prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
import unittest
import numpy as np
class TestSearch(unittest.TestCase):
def test_raise_on_empty_align(self):
from multicov.alignment import Alignment
from multicov.filtering import search
with self.assertRaises(ValueError):
search(Alignment(), 'ABC')
def test_raise_on_empty_seq(self):
from multicov.alignment import Alignment
from multicov.alphabet import protein_alphabet
from multicov.filtering import search
align = Alignment(['IVGGYTCQ', '-VGGTEAQ', 'IGG-KDT-'], alphabet=protein_alphabet)
with self.assertRaises(ValueError):
search(align, '')
def test_search_string(self):
from multicov.alignment import Alignment
from multicov.alphabet import protein_alphabet
from multicov.filtering import search
align = Alignment(['IVGGYTCQ', '-VGGTEAQ', 'IGG-KDT-'], alphabet=protein_alphabet)
self.assertEqual(search(align, 'VGGTEAQ'), 1)
def test_search_list(self):
from multicov.alignment import Alignment
from multicov.alphabet import protein_alphabet
from multicov.filtering import search
align = Alignment(['IVGGYTCQ', '-VGGTEAQ', 'IGG-KDT-'], alphabet=protein_alphabet)
self.assertEqual(search(align, ['I', 'G', 'G', 'K', 'D', 'T']), 2)
def test_search_approx(self):
from multicov.alignment import Alignment
from multicov.alphabet import protein_alphabet
from multicov.filtering import search
align = Alignment(['IVGGYTCQ', '-VGGTEAQ', 'IGG-KDT-'], alphabet=protein_alphabet)
self.assertEqual(search(align, 'IGGYTCQ'), 0)
def test_move_to_top(self):
from multicov.alignment import Alignment
from multicov.alphabet import protein_alphabet
from multicov.filtering import search
align = Alignment(['IVGGYTCQ', '-VGGTEAQ', 'IGG-KDT-'], alphabet=protein_alphabet)
search(align, ['I', 'G', 'G', 'K', 'D', 'T'], move_to_top=True)
self.assertTrue(np.array_equal(align.data, np.asmatrix([
['I', 'G', 'G', '-', 'K', 'D', 'T', '-'],
['-', 'V', 'G', 'G', 'T', 'E', 'A', 'Q'],
['I', 'V', 'G', 'G', 'Y', 'T', 'C', 'Q']
])))
def test_move_to_top_but_return_old_idx(self):
from multicov.alignment import Alignment
from multicov.alphabet import protein_alphabet
from multicov.filtering import search
align = Alignment(['IVGGYTCQ', '-VGGTEAQ', 'IGG-KDT-'], alphabet=protein_alphabet)
self.assertEqual(search(align, ['I', 'G', 'G', 'K', 'D', 'T'], move_to_top=True), 2)
def test_search_dna(self):
from multicov.alignment import Alignment
from multicov.alphabet import dna_alphabet
from multicov.filtering import search
align = Alignment(['ATACAT', 'GATACA', 'AA--GG'], dna_alphabet)
self.assertEqual(search(align, 'AAGG'), 2)
class TestFilterRows(unittest.TestCase):
def test_on_empty(self):
from multicov.alignment import Alignment
from multicov.filtering import filter_rows
align1 = Alignment()
align2 = filter_rows(Alignment())
self.assertEqual(align1, align2)
def test_on_protein(self):
from multicov.alignment import Alignment
from multicov.alphabet import protein_alphabet
from multicov.filtering import filter_rows
threshold = 1/21
align = Alignment([
'WKHNAYDMLSSDCQFESSHKHTQVCSAGMGYCAKPNNWGYW-LIVKMMW-CDYKQKLIYIAPLN',
'KHRCDANC-MLAN-SVIKYTHSACALIWTWNS-KIIRYFFVGAWFKEHFDSVPTAQACVCDSTP',
'LGVVGYYFKPCT-EVPSYSRFNVFHRIFPYLVYRVEE-NHTGHHVQ-KIVRNQYELRSIFDEHG',
'LIGDDHRN-LALCPS-T-GTTCCNWKWRSEWTMHSDTNCNPVAE--SYSKRCNDIGYITWINYA',
'CMPRYWYTYQYDCIFGWRFYSVYWPCLDDMFWQPYVDSMELF-NPMVATEWIMENCQGWG-N-K',
'QWFWRARPFE--FSC-C-PGP-GWVNLIDWMSCNKAMETLMRPYCNPYLKIQLPRSKNLLDDDG',
'VTMPEGHHCPAM-PLDLNGQR-KMWGSDFKKEDCKGYPEKFDCENLIDMDICLSLNTRPED-QR',
'LNYINMHVD-IGP-PCPQYDL--KFKCMYW-GQIEDV-NMQ-WKK-RTMDAVEQIVSMYHMSVE',
'WHV-EWKPVLC-PHWQFYM-VITEYVAMFQWCPPKGMASPKKGNLPRMFQSAKAIGAHRSDM-Y',
'PIWGGFNFPWID-GSQRQQR-EVTTGCDDFEHKYNPYLVPG-WEFGKYSNCWT-RCWRVNHDTV',
'PPCWVEAPYKPMGMWN-GRKV-NVAVWHHVIVL-DMYGLHLLRDWTMVKNAAHIFSHNMEMSNI',
'E-MWRGLIWSKGAY-YQNDNGTFNWPKQKHP-ARCSF-PTVNKDQNPGP-MVQMREFKSQQGQQ',
'RFGKFTCMGFRWKEYFTKQ-NPYKYRGIVHVKVQMIYSANGNLDWIDIPMIIRLKCPFGTRVTQ',
'CGRCGSH-EWL-NIMRNCKFIFWWRPTNAAHIWCARHESPKAD-QIAMTYRML-LDAHIIIVR-',
'T-PMVWRLVWYDHGCDPWMLIV-PIEPCVVKKPQYKDMERFSPDIKCHYLHDKDDGFWGSDKYI',
'LNCPYADLDGL-NPQR-FVVS-RCMRDGFRAVVRVSPDDLS-MWCKAGA-NTTV-DNRH-IVQW'
], protein_alphabet)
align_clean = filter_rows(align, max_gaps=threshold)
# noinspection PyTypeChecker
gap_fraction = np.mean(align.data == '-', axis=1)
# noinspection PyTypeChecker
gap_fraction_clean = np.mean(align_clean.data == '-', axis=1)
self.assertLess(len(align_clean), len(align))
self.assertLessEqual(np.max(gap_fraction_clean), threshold)
self.assertEqual( | np.sum(gap_fraction <= threshold) | numpy.sum |
"""
Code for working with the WThor database, available at
http://www.ffothello.org/informatique/la-base-wthor/.
"""
import logging
import os
from glob import glob
from typing import List, NamedTuple, Tuple, Union
import numpy as np # type: ignore
import tensorflow as tf # type: ignore
from absl import app, flags # type: ignore
from alphazero.data import serialize_example
from board import Bitboard, Board, GameOutcome, Loc, PlayerColor
DB_HEADER_BYTES = 16
GAME_BYTES = 68
GAME_HEADER_BYTES = 8
FLAGS = flags.FLAGS
flags.DEFINE_string(
"wthor_glob",
"resources/wthor/game_data/*.wtb",
"Glob specifying wthor files to convert.",
)
flags.DEFINE_string(
"out_dir", "resources/wthor/preprocessed/", "Directory to dump output files."
)
class GameState(NamedTuple):
board: Board
player: PlayerColor
move: Loc
# Format: (board [8x8x2 ndarray of (mine, opp)], move [x, y], value)
def to_data(self, winner: GameOutcome) -> Tuple[np.ndarray, Tuple[int, int], int]:
mine, opp = self.board.player_view(self.player)
board = np.dstack([mine.piecearray, opp.piecearray])
if winner == GameOutcome.DRAW:
value = 0
elif winner.value == self.player.value:
value = 1
else:
value = -1
return (board, (self.move.x, self.move.y), value)
def __repr__(self) -> str:
return f"Next move: {self.player.value} plays {self.move}\n{self.board}"
class GameSummary(NamedTuple):
real_score: int
theoretical_score: int
states: List[GameState]
outcome: GameOutcome
def parse_move(move_encoding: int) -> Loc:
x = move_encoding % 10 - 1
y = move_encoding // 10 - 1
return Loc(x, y)
def parse_game(game_bytes: bytes) -> GameSummary:
assert len(game_bytes) == GAME_BYTES
header_bytes = game_bytes[:GAME_HEADER_BYTES]
real_score = int(header_bytes[6])
theoretical_score = int(header_bytes[7])
move_bytes = game_bytes[GAME_HEADER_BYTES:]
board = Board.starting_board()
moves = list(map(parse_move, move_bytes))
player = PlayerColor.BLACK
states: List[GameState] = []
for move in moves:
if move == Loc.pass_loc():
break
states.append(GameState(board, player, move))
board = board.resolve_move(move, player)
if board.has_moves(player.opponent):
player = player.opponent
return GameSummary(
real_score=real_score,
theoretical_score=theoretical_score,
states=states,
outcome=board.winning_player,
)
def parse_db(filename: str) -> List[GameSummary]:
logging.info(f"Parsing database: {filename}")
with open(filename, "rb") as f:
db_bytes = f.read()
data_bytes = db_bytes[DB_HEADER_BYTES:]
summaries = []
for i in range(len(data_bytes) // GAME_BYTES):
game_bytes = data_bytes[i * GAME_BYTES : (i + 1) * GAME_BYTES] # noqa
summaries.append(parse_game(game_bytes))
return summaries
def make_dataset(
boards: np.ndarray, moves: np.ndarray, values: np.ndarray
) -> tf.data.Dataset:
def gen():
for i in range(boards.shape[0]):
black_bb = Bitboard.from_piecearray(boards[i, :, :, 0])
white_bb = Bitboard.from_piecearray(boards[i, :, :, 1])
board = Board.from_player_view(black_bb, white_bb, PlayerColor.BLACK)
move = Loc(moves[i, 0], moves[i, 1])
yield serialize_example(board, move, values[i])
return tf.data.Dataset.from_generator(gen, output_types=tf.string, output_shapes=())
def main(_):
logging.basicConfig(level=logging.INFO)
os.makedirs(FLAGS.out_dir, exist_ok=True)
db_files = glob(FLAGS.wthor_glob)
boards: Union[List[np.ndarray], np.ndarray] = []
moves: Union[List[Tuple[int, int]], np.ndarray] = []
values: Union[List[int], np.ndarray] = []
logging.info(f"Reading files: {db_files}")
logging.info(f"Writing files to: {FLAGS.out_dir}")
for filename in db_files:
games = parse_db(filename)
for game in games:
data_samples = map(lambda x: x.to_data(game.outcome), game.states)
new_boards, new_moves, new_values = zip(*data_samples)
boards.extend(new_boards)
moves.extend(new_moves)
values.extend(new_values)
boards = np.array(boards)
moves = | np.array(moves) | numpy.array |
"""
Classes for dealing with data products.
"""
import os
import warnings
import cwinpy
import lal
import lalpulsar
import numpy as np
from astropy.io import registry as io_registry
from gwpy.detector import Channel
from gwpy.io.mp import read_multi
from gwpy.plot.colors import GW_OBSERVATORY_COLORS
from gwpy.segments import SegmentList
from gwpy.timeseries import TimeSeries, TimeSeriesBase
from gwpy.types import Series
from numba import jit
# import utility functions
from .utils import gcd_array, is_par_file, logfactorial
class MultiHeterodynedData(object):
"""
A class to contain time series' of heterodyned data, using the
:class:`~cwinpy.data.HeterodynedData` class, for multiple detectors/data
streams.
Parameters
----------
data: (str, array_like, dict, HeterodynedData)
The heterodyned data either as a string giving a file path, an array of
data, or a dictionary of file paths/data arrays, that are keyed on
valid detector names.
times: (array_like, dict)
If `data` is an array, or dictionary of arrays, then `times` must be
set giving the time stamps for the data values. If `times` is a
dictionary then it should be keyed on the same detector names as in
`data`.
detector: (str, lal.Detector)
If `data` is a file name or data array then `detector` must be given as
a string or :class:`lal.Detector`.
Notes
-----
See the :class:`~cwinpy.data.HeterodynedData` documentation for information
on additional keyword arguments.
"""
def __init__(
self,
data=None,
times=None,
detector=None,
window=30,
inject=False,
par=None,
injpar=None,
freqfactor=2.0,
bbthreshold="default",
remove_outliers=False,
thresh=3.5,
**kwargs,
):
# set keyword argument
self._heterodyned_data_kwargs = {}
self._heterodyned_data_kwargs["window"] = window
self._heterodyned_data_kwargs["par"] = par
self._heterodyned_data_kwargs["injpar"] = injpar
self._heterodyned_data_kwargs["inject"] = inject
self._heterodyned_data_kwargs["freqfactor"] = freqfactor
self._heterodyned_data_kwargs["bbthreshold"] = bbthreshold
self._heterodyned_data_kwargs["remove_outliers"] = remove_outliers
self._heterodyned_data_kwargs["thresh"] = thresh
self._data = dict() # initialise empty dict
self._currentidx = 0 # index for iterator
# add data
if data is not None:
self.add_data(data, times, detector=detector)
def add_data(self, data, times=None, detector=None):
"""
Add heterodyned data to the class.
Parameters
----------
data: (str, array_like, dict, HeterodynedData)
The heterodyned data either as a string giving a file path, an
array of data, a dictionary of file paths/data arrays that are
keyed on valid detector names, or a
:class:`~cwinpy.data.HeterodynedData` object.
times: (array_like, dict)
If `data` is an array, or dictionary of arrays, then `times` must
be set giving the time stamps for the data values. If `times` is
a dictionary then it should be keyed on the same detector names as
in `data`.
detector: (str, lal.Detector)
If `data` is a file name or data array then `detector` must be
given as a string or :class:`lal.Detector`.
"""
if isinstance(data, HeterodynedData):
if data.detector is None and detector is None:
raise ValueError("No detector is given!")
if data.detector is None and detector is not None:
data.detector = detector
self._add_HeterodynedData(data)
elif isinstance(data, dict):
for detkey in data:
if isinstance(data[detkey], HeterodynedData):
if data[detkey].detector is None:
data[detkey].detector = detkey
self._add_HeterodynedData(data[detkey])
else:
if isinstance(times, dict):
if detkey not in times:
raise KeyError(
"'times' does not contain the "
"detector: {}".format(detkey)
)
else:
dettimes = times[detkey]
else:
dettimes = times
self._add_data(data[detkey], detkey, dettimes)
else:
if isinstance(times, dict):
raise TypeError("'times' should not be a dictionary")
self._add_data(data, detector, times)
def _add_HeterodynedData(self, data):
detname = data.detector
if detname not in self._data:
self._data[detname] = [data] # add as a list
else:
# if data from that detector already exists then append to the list
self._data[detname].append(data)
def _add_data(self, data, detector, times=None):
if detector is None or data is None:
raise ValueError("data and detector must be set")
het = HeterodynedData(
data, times, detector=detector, **self._heterodyned_data_kwargs
)
self._add_HeterodynedData(het)
def __getitem__(self, det):
"""
Get the list of :class:`~cwinpy.data.HeterodynedData` objects keyed to
a given detector.
"""
if det in self.detectors:
return self._data[det]
else:
return None
def pop(self, det):
return self._data.pop(det)
@property
def to_list(self):
datalist = []
for key in self._data:
if isinstance(self._data[key], list):
datalist += self._data[key]
else:
datalist.append(self._data[key])
return datalist
@property
def detectors(self):
"""
Return the list of detectors contained in the object.
"""
return list(self._data.keys())
@property
def pars(self):
"""
Return the list of heterodyne source parameter files for each data set
contained in the object.
"""
return [het.par for het in self]
@property
def freq_factors(self):
"""
Return the this of heterodyne frequency scaling factors for each data
set contained in the object.
"""
return [het.freq_factor for het in self]
@property
def injection_snr(self):
"""
Get the coherent optimal signal-to-noise ratio of an injected signal in
all heterodyned data sets. See
:meth:`cwinpy.data.HeterodynedData.injection_snr`.
"""
snr2 = 0.0
for het in self:
if het.injpar is not None:
snr2 += het.injection_snr ** 2
return np.sqrt(snr2)
def signal_snr(self, signalpar):
"""
Get the coherent signal-to-noise ratio of a given signal. See
:meth:`cwinpy.data.HeterodynedData.signal_snr`.
"""
snr2 = 0.0
for het in self:
snr2 += het.signal_snr(signalpar) ** 2
return np.sqrt(snr2)
def __iter__(self):
self._currentidx = 0 # reset iterator index
return self
def __next__(self):
if self._currentidx >= len(self):
raise StopIteration
else:
self._currentidx += 1
return self.to_list[self._currentidx - 1]
def plot(
self,
det=None,
together=False,
which="abs",
figsize=(12, 4),
remove_outliers=False,
thresh=3.5,
zero_time=True,
labelsize=None,
fontsize=None,
legendsize=None,
fontname=None,
labelname=None,
**plotkwargs,
):
"""
Plot all, or some of, the time series' contained in the class. The
general arguments can be seen in
:meth:`cwinpy.data.HeterodynedData.plot` and additional arguments are
given below.
Parameters
----------
together: bool, False
Set to ``True`` to put all the plots onto one figure, otherwise
they will be created on individual
:class:`~matplotlib.figure.Figure` objects.
det: str
If a detector name is supplied, then only the time series' for that
detector will be plotted.
Returns
-------
list:
A :class:`~matplotlib.figure.Figure` object, or list of
:class:`~matplotlib.figure.Figure` objects.
"""
from matplotlib import pyplot as pl
if len(self) == 0:
# nothing in the class!
return None
# set which plots to output
ndet = 1
if det is not None:
if det not in self.detectors:
raise ValueError("Detector {} is not in the class".format(det))
# get the number of time series' for the requested detector
ndet = len(self[det])
nplots = 1
if together:
if ndet > 1:
nplots = ndet
hets = self[det]
else:
nplots = len(self)
hets = self # datasets to plot
# create the figure
if figsize[0] == 12 and figsize[1] == 4:
# check default size and increase
figsize = (figsize[0], figsize[1] * nplots)
figs, axs = pl.subplots(nplots, 1, figsize=figsize)
for ax, het in zip(axs, hets):
_ = het.plot(
which=which,
ax=ax,
remove_outliers=remove_outliers,
thresh=thresh,
zero_time=zero_time,
labelsize=labelsize,
fontsize=fontsize,
legendsize=legendsize,
fontname=fontname,
labelname=labelname,
**plotkwargs,
)
else:
# a list of figures
figs = []
if det is not None:
hets = self[det]
else:
hets = self
# loop over data and produce plots
for het in hets:
figs.append(
het.plot(
which=which,
figsize=figsize,
remove_outliers=remove_outliers,
thresh=thresh,
zero_time=zero_time,
labelsize=labelsize,
fontsize=fontsize,
legendsize=legendsize,
fontname=fontname,
labelname=labelname,
**plotkwargs,
)
)
return figs
def power_spectrum(
self,
det=None,
together=False,
figsize=None,
remove_outliers=None,
thresh=None,
labelsize=None,
fontsize=None,
legendsize=None,
fontname=None,
labelname=None,
dt=None,
fraction_labels=None,
fraction_label_num=None,
average=None,
window=None,
overlap=None,
**plotkwargs,
):
"""
Plot all, or some of, the power spectra of the time series' contained
in the class. The general arguments can be seen in
:meth:`cwinpy.data.HeterodynedData.power_spectrum` and additional
arguments are given below.
Parameters
----------
together: bool, False
Set to ``True`` to put all the plots onto one figure, otherwise
they will be created on individual
:class:`~matplotlib.figure.Figure` objects.
det: str
If a detector name is supplied, then only the time series' for that
detector will be plotted.
Returns
-------
list:
A :class:`~matplotlib.figure.Figure` object, or list of
:class:`~matplotlib.figure.Figure` objects.
"""
return self._plot_power(
"power",
det=det,
together=together,
figsize=figsize,
remove_outliers=remove_outliers,
thresh=thresh,
labelsize=labelsize,
fontsize=fontsize,
labelname=labelname,
fontname=fontname,
dt=dt,
fraction_labels=fraction_labels,
fraction_label_num=fraction_label_num,
average=average,
window=window,
overlap=overlap,
**plotkwargs,
)
def periodogram(
self,
det=None,
together=False,
figsize=None,
remove_outliers=None,
thresh=None,
labelsize=None,
fontsize=None,
legendsize=None,
fontname=None,
labelname=None,
fraction_labels=None,
fraction_label_num=None,
**plotkwargs,
):
"""
Plot all, or some of, the periodograms of the time series' contained
in the class. The general arguments can be seen in
:meth:`cwinpy.data.HeterodynedData.periodogram` and additional
arguments are given below.
Parameters
----------
together: bool, False
Set to ``True`` to put all the plots onto one figure, otherwise
they will be created on individual
:class:`~matplotlib.figure.Figure` objects.
det: str
If a detector name is supplied, then only the time series' for that
detector will be plotted.
Returns
-------
list:
A :class:`~matplotlib.figure.Figure` object, or list of
:class:`~matplotlib.figure.Figure` objects.
"""
return self._plot_power(
"periodogram",
det=det,
together=together,
figsize=figsize,
remove_outliers=remove_outliers,
thresh=thresh,
labelsize=labelsize,
fontsize=fontsize,
labelname=labelname,
fontname=fontname,
fraction_labels=fraction_labels,
fraction_label_num=fraction_label_num,
**plotkwargs,
)
def spectrogram(
self,
det=None,
together=False,
figsize=None,
remove_outliers=None,
thresh=None,
labelsize=None,
fontsize=None,
legendsize=None,
fontname=None,
labelname=None,
fraction_labels=None,
fraction_label_num=None,
dt=None,
overlap=None,
window=None,
**plotkwargs,
):
"""
Plot all, or some of, the spectograms of the time series' contained
in the class. The general arguments can be seen in
:meth:`cwinpy.data.HeterodynedData.spectrogram` and additional
arguments are given below.
Parameters
----------
together: bool, False
Set to ``True`` to put all the plots onto one figure, otherwise
they will be created on individual
:class:`~matplotlib.figure.Figure` objects.
det: str
If a detector name is supplied, then only the time series' for that
detector will be plotted.
Returns
-------
list:
A :class:`~matplotlib.figure.Figure` object, or list of
:class:`~matplotlib.figure.Figure` objects.
"""
return self._plot_power(
"spectrogram",
det=det,
together=together,
figsize=figsize,
window=window,
remove_outliers=remove_outliers,
thresh=thresh,
labelsize=labelsize,
fontsize=fontsize,
labelname=labelname,
fontname=fontname,
dt=dt,
fraction_labels=fraction_labels,
fraction_label_num=fraction_label_num,
overlap=overlap,
**plotkwargs,
)
def _plot_power(
self,
plottype,
det=None,
together=False,
figsize=None,
remove_outliers=None,
thresh=None,
labelsize=None,
fontsize=None,
legendsize=None,
fontname=None,
labelname=None,
dt=None,
average=None,
overlap=None,
window=None,
fraction_labels=None,
fraction_label_num=None,
**plotkwargs,
):
"""
General purpose function for plotting the various spectrum figures.
Parameters
----------
plottype: str
The "spectrum" plots that are required: 'power_spectrum',
'periodogram', or 'spectrogram'
"""
from matplotlib import pyplot as pl
if plottype.lower() not in ["spectrogram", "periodogram", "power"]:
raise ValueError("Spectrum plot type is not known")
if len(self) == 0:
# nothing in the class!
return None
# set which plots to output
ndet = 1
if det is not None:
if det not in self.detectors:
raise ValueError("Detector {} is not in the class".format(det))
# get the number of time series' for the requested detector
ndet = len(self[det])
# set keyword arguments
speckwargs = {}
for key, value in zip(
[
"thresh",
"remove_outliers",
"labelsize",
"labelname",
"fontsize",
"fontname",
"legendsize",
"fraction_labels",
"fraction_label_num",
"figsize",
],
[
thresh,
remove_outliers,
labelsize,
labelname,
fontsize,
fontname,
legendsize,
fraction_labels,
fraction_label_num,
figsize,
],
):
if value is not None:
speckwargs[key] = value
if plottype.lower() == "power" and average is not None:
speckwargs["average"] = average
if plottype.lower() in ["spectrogram", "power"]:
if overlap is not None:
speckwargs["overlap"] = overlap
if window is not None:
speckwargs["window"] = window
if dt is not None:
speckwargs["dt"] = dt
nplots = 1
if together:
if ndet > 1:
nplots = ndet
hets = self[det]
else:
nplots = len(self)
hets = self # datasets to plot
# create the figure
if figsize is None:
# create default size
if plottype.lower() == "spectrogram":
figsize = (12, 4 * nplots)
else:
figsize = (6, 5 * nplots)
figs, axs = pl.subplots(nplots, 1, figsize=figsize)
for ax, het in zip(axs, hets):
if plottype.lower() == "periodogram":
plfunc = het.periodogram
elif plottype.lower() == "power":
plfunc = het.power_spectrum
else:
plfunc = het.spectrogram
_ = plfunc(**speckwargs, ax=ax, **plotkwargs)
figs.tight_layout()
else:
# a list of figures
figs = []
if det is not None:
hets = self[det]
else:
hets = self
# loop over data and produce plots
for het in hets:
if plottype.lower() == "periodogram":
plfunc = het.periodogram
figidx = 2
elif plottype.lower() == "power":
plfunc = het.power_spectrum
figidx = 2
else:
plfunc = het.spectrogram
figidx = 3
figs.append(plfunc(**speckwargs, **plotkwargs)[figidx])
return figs
def __len__(self):
length = 0
for key in self._data:
if isinstance(self._data[key], list):
length += len(self._data[key])
else:
length += 1
return length
class HeterodynedData(TimeSeriesBase):
"""
A class to contain a time series of heterodyned data.
Some examples of input `data` are:
1. The path to a file containing (gzipped) ascii text with the
following three columns::
# GPS time stamps real strain imaginary strain
1000000000.0 2.3852e-25 3.4652e-26
1000000060.0 -1.2963e-26 9.7423e-25
1000000120.0 5.4852e-25 -1.8964e-25
...
or four columns::
# GPS time stamps real strain imaginary strain std. dev.
1000000000.0 2.3852e-25 3.4652e-26 1.0e-25
1000000060.0 -1.2963e-26 9.7423e-25 1.0e-25
1000000120.0 5.4852e-25 -1.8964e-25 1.0e-25
...
where any row that starts with a ``#`` or a ``%`` is considered a comment.
2. A 1-dimensional array of complex data, and accompanying array of `time`
values, e.g.,
>>> import numpy as np
>>> N = 100 # the data length
>>> data = np.random.randn(N) + 1j*np.random.randn(N)
>>> times = np.linspace(1000000000., 1000005940., N)
or, a 2-dimensional array with the real and complex values held in separate
columns, e.g.,
>>> import numpy as np
>>> N = 100 # the data length
>>> data = np.random.randn(N, 2)
>>> times = np.linspace(1000000000., 1000005940., N)
or, a 2-dimensional array with the real and complex values held in separate
columns, *and* a third column holding the standard deviation for each
entry, e.g.,
>>> import numpy as np
>>> N = 100 # the data length
>>> stds = np.ones(N) # standard deviations
>>> data = np.array([stds*np.random.randn(N),
>>> ... stds*np.random.randn(N), stds]).T
>>> times = np.linspace(1000000000., 1000005940., N)
Parameters
----------
data: (str, array_like)
A file (plain ascii text, gzipped ascii text, or HDF5 file) containing
a time series of heterodyned data, or an array containing the complex
heterodyned data.
times: array_like
If the data was passed using the `data` argument, then the associated
time stamps should be passed using this argument.
par: (str, lalpulsar.PulsarParametersPy)
A parameter file, or :class:`lalpulsar.PulsarParametersPy` object
containing the parameters with which the data was heterodyned.
detector: (str, lal.Detector)
A string, or lal.Detector object, identifying the detector from which
the data was generated.
window: int, 30
The length of a window used for calculating a running median over the
data. If set to zero the running median will just be initialised with
zero values.
inject: bool, False
Set to ``True`` to add a simulated signal to the data based on the
parameters supplied in `injpar`, or `par` if `injpar` is not given.
injpar: (str, lalpulsar.PulsarParametersPy)
A parameter file name or :class:`lalpulsar.PulsarParametersPy`
object containing values for the injected signal. A `par` file must
also have been provided, and the injected signal will assume that
the data has already been heterodyned using the parameters from
`par`, which could be different.
injtimes: list, None
A list containing pairs of times between which to add the simulated
signal. By default the signal will be added into the whole data set.
freqfactor: float, 2.0
The frequency scale factor for the data signal, e.g., a value of two
for emission from the l=m=2 mode at twice the rotation frequency of the
source.
fakeasd: (float, str)
A amplitude spectral density value (in 1/sqrt(Hz)) at which to
generate simulated Gaussian noise to add to the data. Alternatively, if
a string is passed, and that string represents a known detector, then
the amplitude spectral density for that detector at design sensitivity
will be used (this requires a `par` value to be included, which
contains the source rotation frequency).
fakeseed: (int, class:`numpy.random.RandomState`), None
A seed for the random number generator used to create the fake data
(see :meth:`numpy.random.seed` and :class:`numpy.random.RandomState`
for more information).
issigma: bool
Set to ``True`` if the ``fakeasd`` value passed is actually a noise
standard deviation value rather than an amplitude spectral density.
bbthreshold: (str, float), "default"
The threshold method, or value for the
:meth:`~cwinpy.data.HeterodynedData.bayesian_blocks` function.
bbminlength: int, 5
The minimum length (in numbers of data points) of a chunk that the data
can be split into by the
:meth:`~cwinpy.data.HeterodynedData.bayesian_blocks` function. To
perform no splitting of the data set this value to be larger than the
total data length, e.g., ``inf``.
bbmaxlength: int, inf
The maximum length (in numbers of data points) of a chunk that the data
can be split into by the
:meth:`~cwinpy.data.HeterodynedData.bayesian_blocks` function. By
default this is ``inf``, i.e., chunks can be as long as possible.
remove_outliers: bool, False
If ``True`` outliers will be found (using
:meth:`~cwinpy.data.HeterodynedData.find_outliers`) and removed from the
data. They will not be stored anywhere in the class.
thresh: float, 3.5
The modified z-score threshold for outlier removal (see
:meth:`~cwinpy.data.HeterodynedData.find_outliers`)
comments: str
A string containing any comments about the data.
ephemearth: str, None
The path to the Earth ephemeris used for the signal phase model.
ephemsun: str, None
The path to the Sun ephemeris used for the signal phase model.
"""
# set some default detector color maps for plotting
colmapdic = {"H1": "Reds", "L1": "Blues", "V1": "PuRd", "G1": "Greys"}
# set some default plotting values
PLOTTING_DEFAULTS = {
"labelsize": 14, # font size for axes tick labels
"fontsize": 16, # font size for axes labels
"fontname": "Gentium", # font name for axes labels
"labelname": "Carlito", # font names for axes tick labels
}
_metadata_slots = Series._metadata_slots + (
"dt",
"comments",
"par",
"injpar",
"window",
"laldetector",
"vars",
"bbthreshold",
"bbminlength",
"bbmaxlength",
"outlier_thresh",
"injtimes",
"freq_factor",
"filter_history",
"running_median",
"inj_data",
"input_stds",
"outlier_mask",
"include_ssb",
"include_bsb",
"include_glitch",
"include_fitwaves",
"cwinpy_version",
)
def __new__(
cls,
data=None,
times=None,
par=None,
detector=None,
window=30,
inject=False,
injpar=None,
injtimes=None,
freqfactor=2.0,
fakeasd=None,
fakeseed=None,
issigma=False,
bbthreshold="default",
bbminlength=5,
bbmaxlength=np.inf,
remove_outliers=False,
thresh=3.5,
comments="",
ephemearth=None,
ephemsun=None,
**kwargs,
):
stds = None # initialise standard deviations
# read/parse data
if isinstance(data, str):
try:
new = cls.read(data)
except Exception as e:
raise IOError("Error reading file '{}':\n{}".format(data, e))
if new.detector is None:
new.detector = detector
else:
if isinstance(data, (TimeSeriesBase, HeterodynedData)):
dataarray = data.value
hettimes = data.times
if detector is None:
detector = data.detector
if type(data) is HeterodynedData:
if data.stds is not None:
stds = data.stds
else:
# use data
hettimes = times
if hettimes is None and data is None:
raise ValueError("Time stamps and/or data must be supplied")
elif data is not None:
dataarray = np.atleast_2d(np.asarray(data))
if dataarray.shape[0] == 1:
dataarray = dataarray.T
else:
# set data to zeros
dataarray = np.zeros((len(hettimes), 1), dtype=np.complex)
if (
dataarray.shape[1] == 1
and dataarray.dtype == np.complex
and hettimes is not None
):
dataarray = dataarray.flatten()
elif dataarray.shape[1] == 2 and hettimes is not None:
# real and imaginary components are separate
dataarray = dataarray[:, 0] + 1j * dataarray[:, 1]
elif dataarray.shape[1] == 3:
if hettimes is None:
# first column of array should be times
hettimes = dataarray[:, 0]
dataarray = dataarray[:, 1] + 1j * dataarray[:, 2]
else:
# third column can be standard deviations
stds = dataarray[:, 2]
dataarray = dataarray[:, 0] + 1j * dataarray[:, 1]
elif dataarray.shape[1] == 4:
if hettimes is None:
# first column of array should be times
hettimes = dataarray[:, 0]
stds = dataarray[:, 3]
dataarray = dataarray[:, 1] + 1j * dataarray[:, 2]
else:
raise ValueError("Supplied data array is the wrong shape")
else:
raise ValueError("Supplied data array is the wrong shape")
if len(hettimes) != dataarray.shape[0]:
raise ValueError("Supplied times is not that same length as the data")
if hettimes is not None and times is not None:
if not | np.array_equal(hettimes, times) | numpy.array_equal |
from __future__ import print_function, division, absolute_import
import pickle
from copy import copy
import numpy as np
import pytest
from crick import SummaryStats
normal = | np.random.normal(50, scale=100, size=10000) | numpy.random.normal |
import torch
import torchvision.models as models
from torchvision import transforms as trn
import torch.nn as nn
import numpy as np
from PIL import Image
import os
from tqdm import tqdm
import argparse
import warnings
warnings.filterwarnings('ignore', 'Possibly corrupt EXIF data*')
parser = argparse.ArgumentParser(description = "Intra-scale feature extraction")
parser.add_argument("-batch_size_base", "--batch_size_base", type=int, help="Number of images processed at one time", default=32)
parser.add_argument("-datasets", "--datasets", nargs='+',help="Specify the dataset used for evaluation", default=['SUN397','Places'])
parser.add_argument("-gpu", "--gpu", type=int, help="1 for gpu and -1 for cpu", default=1)
parser.add_argument("-arches", "--arches", nargs='+',help="Architecture of the CNN feature extractor", default=['alexnet'])
parser.add_argument("-scales", "--scales", nargs='+',help="The total scales(up to 3), in which the features are extracted. ", default=['1','2','3'])
parser.add_argument("-thresholds", "--thresholds", nargs='+',help="The threshold used to select the number of discriminative patches", default=['100','150'])
parser.add_argument("-resolution", "--resolution", help="specify the mode of input image resolution ('ori_res' or 'low_res') ", default="ori_res")
parser.add_argument("-selection_types", "--selection_types", nargs='+',help="The type of method (adi_red, dense or random) used for patch selection ", default=['adi_red'])
parser.add_argument("-pretrain_databases", "--pretrain_databases",nargs='+', help="Specify the pre-training data (Places(PL) or ImageNet(IN)) of the pre-trained CNN feature extractor", default=['PL','PL','IN'])
args = parser.parse_args()
batch_size_base=args.batch_size_base
datasets=args.datasets
arches=args.arches
scales=args.scales
thresholds=args.thresholds
resolution=args.resolution
selection_types=args.selection_types
pretrain_databases=args.pretrain_databases
if args.gpu==1:
device = torch.device("cuda:0")
if args.gpu==-1:
device = torch.device("cpu")
def returnTF(scale,resolution):
# load the image transformer
if scale=='1':
scale_image_size=224
if scale=='2':
scale_image_size=448
if scale=='3':
scale_image_size=896
if resolution=='ori_res' :
tf = trn.Compose([
trn.Resize((scale_image_size,scale_image_size)),
trn.ToTensor (),
trn.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
if resolution=='low_res' :
if scale=='1':
tf = trn.Compose([
trn.Resize((224,224)),
trn.ToTensor (),
trn.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
if scale=='2' or scale=='3':
tf = trn.Compose([
trn.Resize((224,224)),
trn.Resize((scale_image_size,scale_image_size)),
trn.ToTensor (),
trn.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
return tf
def load_model(arch,pretrain_database):
if pretrain_database=='PL':
model_file = '%s_places365.pth.tar' % arch
if not os.access(model_file, os.W_OK):
os.system('wget http://places2.csail.mit.edu/models_places365/' + model_file)
model = models.__dict__[arch](num_classes=365)
checkpoint = torch.load(model_file, map_location=lambda storage, loc: storage)
state_dict = {str.replace(k,'module.',''): v for k,v in checkpoint['state_dict'].items()}
model.load_state_dict(state_dict)
elif pretrain_database=='IN':
if arch=='resnet50':
model=models.resnet50(pretrained=True)
elif arch=='resnet18':
model=models.resnet18(pretrained=True)
elif arch=='alexnet':
model=models.alexnet(pretrained=True)
if arch=='resnet50' or arch=='resnet18':
feature_extractor = nn.Sequential(*list(model.children())[:-1])
if arch=='alexnet':
new_classifier = nn.Sequential(*list(model.classifier.children())[:-1])
model.classifier = new_classifier
feature_extractor=model
feature_extractor.eval()
return feature_extractor
modes=['train','test']
for dataset in datasets:
image_path='./datasets/images/'+dataset+'/'
label_path='./datasets/labels/'+dataset+'/'
result_path = './results/intermediate/'+dataset+'/'
if dataset=='SUN397':
splits=['01','02','03','04','05','06','07','08','09','10']#10 fixed train/test splits for evaluation on SUN397
with open(label_path+'image_list_all.txt', 'r') as f:
image_list = f.readlines()
for arch in arches:
for selection_type in selection_types:
if selection_type=='adi_red':
for scale in scales:
if arch == 'alexnet':
fea_dim=4096
if scale=='1':
batch_size_ori = 16*batch_size_base
if scale=='2' or scale=='3':
batch_size_ori = 2*batch_size_base
if arch == 'resnet18':
fea_dim=512
if scale=='1':
batch_size_ori = 4*batch_size_base
if scale=='2' or scale=='3':
batch_size_ori = batch_size_base
if arch == 'resnet50':
fea_dim=2048
if scale=='1':
batch_size_ori = 4*batch_size_base
if scale=='2' or scale=='3':
batch_size_ori = batch_size_base
feature_extractor = load_model(arch,pretrain_databases[int(scale)-1])
feature_extractor=feature_extractor.to(device)
for mode in modes:
if dataset=='Places':
with open(label_path+'image_list_'+mode+'.txt', 'r') as f:
image_list = f.readlines()
num_images = len(image_list)
if scale=='2' or scale =='3':
local_maxima= | np.load(result_path+'local_max_'+scale+'_'+mode+'.npy') | numpy.load |
import numpy as np
import tensorflow as tf
import datetime
import time
from model import Model, get_err_threhold, performances
from tensorflow.python.platform import flags
from data import DataSet
FLAGS = flags.FLAGS
flags.DEFINE_integer('train_iterations', 90000, 'number of training iterations.')
# Training options
flags.DEFINE_integer('batch_size', 8, '')
flags.DEFINE_float('lr', 0.001, 'the base learning rate')
flags.DEFINE_integer('lr_decay_itr', 0, 'number of iteration that the lr decays')
flags.DEFINE_float('l2_alpha', 0.00001, 'param of the l2_norm loss')
flags.DEFINE_float('l1_alpha', 0.001, 'param of the l1_norm loss')
flags.DEFINE_float('dropout', 0.1, 'param of the l1_norm loss')
flags.DEFINE_float('loss_alpha1', 1, 'param of the l1_norm loss')
flags.DEFINE_float('loss_alpha2', 1, 'param of the l1_norm loss')
flags.DEFINE_float('score_alpha', 0.5, 'param of the l1_norm loss')
flags.DEFINE_bool('attention', True, 'param of the l1_norm loss')
flags.DEFINE_bool('leaky_relu', True, 'param of the l1_norm loss')
flags.DEFINE_bool('CDC', True, 'param of the l1_norm loss')
flags.DEFINE_string('network', 'DTN', 'network name')
flags.DEFINE_integer('base_num_filters', 8, 'number of filters for conv nets -- 32 for miniimagenet, 64 for omiglot.')
flags.DEFINE_bool('last_relu', True, 'whether to use bias in the attention operation')
flags.DEFINE_bool('last_bn', True, 'whether to use bias in the attention operation')
# flags.DEFINE_bool('clahe', True, 'whether to use bias in the attention operation')
flags.DEFINE_string('loss', 'L2', 'L2 or Con')
flags.DEFINE_bool('bn_nn', False, '')
flags.DEFINE_integer('num_gpus', 1, '')
## Logging, saving, and testing options
flags.DEFINE_bool('log', True, 'if false, do not log summaries, for debugging code.')
flags.DEFINE_string('logdir', 'logs/miniimagenet1shot/', 'directory for summaries and checkpoints.')
flags.DEFINE_bool('resume', False, 'resume training if there is a model available')
flags.DEFINE_bool('train', True, 'True to train, False to test.')
flags.DEFINE_integer('test_iter', 300, 'iteration to load model (-1 for latest model)')
flags.DEFINE_bool('net', False, 'whether use the data saved on the risk disk, or use the data saved on the local disk.')
flags.DEFINE_integer('protocol', 3, '')
def train(model, saver, sess, exp_string, dataset, resume_itr=0):
SUMMARY_INTERVAL = 100
PRINT_INTERVAL = 10
TEST_PRINT_INTERVAL = 100
min_ACER_itr = 0
min_ACER = 1
print('Done initializing, starting training.')
print(exp_string)
losses_map, losses_binary = [], []
for itr in range(resume_itr, FLAGS.train_iterations):
# 调节learning rate
if FLAGS.lr_decay_itr > 0:
lr = FLAGS.lr * 0.5 ** int(itr / FLAGS.lr_decay_itr)
if int(itr % FLAGS.lr_decay_itr) < 2:
print('change the mata lr to:' + str(lr) + ', ----------------------------')
else:
lr = FLAGS.lr
feed_dict = {model.lr: lr}
feed_dict_data = {}
if itr == resume_itr:
image_labels = dataset.get_train_data(FLAGS.batch_size)
[files, labels] = zip(*image_labels)
feed_dict_data[dataset.image_lists] = files
sess.run(dataset.iterator, feed_dict=feed_dict_data)
faces, depthes, IRs = sess.run(dataset.out_images)
lbls = | np.array(labels) | numpy.array |
from database import Database, LoadDatabase
from numba import njit, vectorize
import matplotlib.pyplot as plt
import numpy as np
import pickle
import time
import bz2
import os
os.environ['NUMBA_DISABLE_INTEL_SVML'] = '1'
CENTER = 1200
RATEDBOUND = np.inf
def prepare_data(db):
CALCS_FILE = "calcs.pickle.bz2"
# if calculated and save before, load it from file
if os.path.exists(CALCS_FILE):
with bz2.BZ2File(CALCS_FILE, "r") as infile:
print("Starting loading calcs file ...")
ret = pickle.load(infile)
print("File read.")
else:
print("Starting calcs ...")
# load database
db = LoadDatabase()
# collect all handles in all standings
all_handles = set()
for standings in db.standings.values():
for handle in standings.index:
all_handles.add(handle)
# create to way mappings (id, handle)
handle_to_id = {handle: i for i, handle in enumerate(all_handles)}
id_to_handle = {i: handle for handle, i in handle_to_id.items()}
# sort standings by startTime
sorted_standings = [(k, v) for k, v in sorted(db.standings.items(), key=lambda x: db.contests.loc[x[0]].startTime)]
# merge handles, ranks and standings length into flat array
handle_ids_merged = []
ranks_merged = []
standings_lengths_merged = []
for c_id, standings in sorted_standings:
standings = standings.sort_values("rank")
for handle in standings.index:
handle_ids_merged.append(handle_to_id[handle])
ranks_merged.append(standings["rank"][handle])
standings_lengths_merged.append(len(standings))
# convert them to numpy array
handle_ids = np.array(handle_ids_merged, dtype=np.int32)
ranks = np.array(ranks_merged, dtype=np.int32)
standings_lens = np.array(standings_lengths_merged, dtype=np.int32)
user_contest_cnt = np.bincount(handle_ids)
with bz2.BZ2File(CALCS_FILE, "w") as outfile:
ret = (handle_to_id, id_to_handle, sorted_standings, handle_ids, ranks, standings_lens, user_contest_cnt)
pickle.dump(ret, outfile)
print("Calcs ended.")
return ret
def get_first_K_contests(K, handle_ids, ranks, standings_lens, user_contest_cnt):
if K == -1:
return handle_ids, ranks, standings_lens, user_contest_cnt
K_standings_len = np.sum(standings_lens[:K])
K_handle_ids = handle_ids[:K_standings_len]
K_ranks = ranks[:K_standings_len]
K_standings_lens = standings_lens[:K]
K_user_contest_cnt = np.bincount(K_handle_ids)
return K_handle_ids, K_ranks, K_standings_lens, K_user_contest_cnt
# Additional return value of AtCoderRatingSystem, which has all calculations, meaningful variables (pretty specific stuff)
class Result:
def __init__(self, consider, handle_to_id, id_to_handle, sorted_standings, handle_ids, ranks, standings_lens,
user_contest_cnt, nums, dens, aperfs, perfs, ratings, offsets, local_offsets, current_ranks,
Is, errors):
self.consider = consider
self.handle_to_id = handle_to_id
self.id_to_handle = id_to_handle
self.sorted_standings = sorted_standings
self.handle_ids = handle_ids
self.ranks = ranks
self.standings_lens = standings_lens
self.user_contest_cnt = user_contest_cnt
self.nums = nums
self.dens = dens
self.aperfs = aperfs
self.perfs = perfs
self.ratings = ratings
self.offsets = offsets
self.local_offsets = local_offsets
self.current_ranks = current_ranks
self.Is = Is
self.errors = errors
def get_cf_ratings(self, handle):
ratings = []
if self.consider == -1:
trimmed_standings = self.sorted_standings
else:
trimmed_standings = self.sorted_standings[:self.consider]
for contest_id, standings in trimmed_standings:
if handle in standings.index:
ratings.append(standings.loc[handle]["oldRating"])
return ratings
def get_random_user(self, threshold=10):
all_ids = np.arange(len(self.user_contest_cnt))
mask = self.user_contest_cnt >= threshold
handle_id = np.random.choice(all_ids[mask])
return self.id_to_handle[handle_id]
def plot_user(self, handle, verbose=False):
handle_id = self.handle_to_id[handle]
contest_cnt = self.user_contest_cnt[handle_id]
user_offset = self.offsets[handle_id]
print(contest_cnt, self.local_offsets[handle_id])
assert contest_cnt == self.local_offsets[handle_id]
perfs = self.perfs[user_offset:user_offset+contest_cnt]
atcoder_ratings = self.ratings[user_offset:user_offset+contest_cnt]
cf_ratings = self.get_cf_ratings(handle)
assert contest_cnt == len(cf_ratings)
print("number of contests", contest_cnt)
if verbose:
print("perfs", perfs)
print("aperf", self.aperfs[handle_id])
print("num", self.nums[handle_id])
print("den", self.dens[handle_id])
xs = np.arange(contest_cnt)
plt.figure(figsize=(15, 8))
plt.plot(xs, atcoder_ratings, label="AtCoder")
plt.plot(xs, cf_ratings, label="CodeForces")
# plt.plot(xs, perfs, label="AtCoder Perfs")
plt.title(handle)
plt.legend()
plt.show()
# - return tuple (errors, results), where
# results: Result class described above
# errors: dictionary of: error_function_name -> (dictionary of: contest id -> error calculated with that function)
# - consider only `consider` first contests, if consider == -1, all contests are taken
# - `err_fun` parameter is one function or list of functions to calculate error with
# actual, main function
def AtCoderRatingSystem(db, err_fun=None,
g_base=2, g_power_div=800, binsearch_base=6, binsearch_power_div=400, decay=0.9,
consider=50, verbose=False, **kwargs):
CENTER = 1200
RATEDBOUND = np.inf
@njit(fastmath=True)
def atcoder_calculate(handle_ids, ranks, standings_lens, user_contest_cnt,
verbose=True):
user_cnt = len(user_contest_cnt)
standings_cnt = len(standings_lens)
history_cnt = len(handle_ids)
def g(x):
return np.power(g_base, x / g_power_div)
def ginv(y):
return g_power_div * np.log(y) / np.log(g_base)
# AtCoder stuff
ranks = ranks.copy().astype(np.float64)
nums = np.zeros(user_cnt, dtype=np.float64)
dens = np.zeros(user_cnt, dtype=np.float64)
aperfs = np.full(user_cnt, CENTER, dtype=np.float64)
perfs = np.empty(history_cnt, dtype=np.float64)
ratings = np.zeros(history_cnt, dtype=np.float64)
offsets = | np.cumsum(user_contest_cnt) | numpy.cumsum |
import unittest as unittest
import numpy as np
import pandas as pd
from limmbo.io.input import InputData
from limmbo.io.input import MissingInput
from limmbo.io.input import DataMismatch
from limmbo.io.input import FormatError
class Input(unittest.TestCase):
def setUp(self):
self.datainput = InputData()
self.phenotypes = np.array(((1, 2), (1, 3)))
self.pheno_samples = np.array(('S1', 'S2'))
self.phenotype_ID = | np.array(('ID1', 'ID2')) | numpy.array |
# need to have a more uniform method to exchange (pack/unpack) 1D and 2D PROCESSED data with hdf5
# type of data: Data1d, MatrixWithCoordinates (not just simple numpy arrays)
import pylab as plt
import h5py
import numpy as np
import time,datetime
import os,copy,subprocess,re
import json,pickle,fabio
import multiprocessing as mp
from py4xs.slnxs import Data1d,average,filter_by_similarity,trans_mode,estimate_scaling_factor
from py4xs.utils import common_name,max_len,Schilling_p_value
from py4xs.detector_config import create_det_from_attrs
from py4xs.local import det_names,det_model,beamline_name # e.g. "_SAXS": "pil1M_image"
from py4xs.data2d import Data2d,Axes2dPlot,MatrixWithCoords,DataType
from py4xs.utils import run
from itertools import combinations
from scipy.interpolate import interp1d
from scipy.ndimage.filters import gaussian_filter
from scipy.interpolate import UnivariateSpline as uspline
from scipy.integrate import simpson
def lsh5(hd, prefix='', top_only=False, silent=False, print_attrs=True):
""" list the content of a HDF5 file
hd: a handle returned by h5py.File()
prefix: use to format the output when lsh5() is called recursively
top_only: returns the names of the top-level groups
silent: suppress printouts if True
"""
if top_only:
tp_grps = list(hd.keys())
if not silent:
print(tp_grps)
return tp_grps
for k in list(hd.keys()):
print(prefix, k)
if isinstance(hd[k], h5py.Group):
if print_attrs:
print(list(hd[k].attrs.items()))
lsh5(hd[k], prefix+"=", silent=silent, print_attrs=print_attrs)
def create_linked_files(fn, fnlist):
""" create a new file to links to data in existing files in the fn_list
for now assume that all files have the same detector/qgrid configuration without checking
"""
ff = h5py.File(fn, 'w')
for s in fnlist:
fs = h5py.File(s, "r")
if len(ff.attrs)==0:
for an in fs.attrs:
ff.attrs[an] = fs.attrs[an]
ff.flush()
for ds in lsh5(fs, top_only=True, silent=True):
ff[ds] = h5py.ExternalLink(s, ds)
fs.close()
ff.close()
def integrate_mon(em, ts, ts0, exp):
""" integrate monitor counts
monitor counts are given by em with timestamps ts
ts0 is the timestamps on the exposures, with duration of exp
"""
ffe = interp1d(ts, em)
em0 = []
for t in ts0:
tt = np.concatenate(([t], ts[(ts>t) & (ts<t+exp)], [t+exp]))
ee = ffe(tt)
em0.append(simpson(ee, tt))
return np.asarray(em0)/exp
def pack_d1(data, ret_trans=True):
""" utility function to creat a list of [intensity, error] from a Data1d object
or from a list of Data1s objects
"""
if isinstance(data, Data1d):
if ret_trans:
return np.asarray([data.data,data.err]), data.trans
else:
return np.asarray([data.data,data.err])
elif isinstance(data, list):
tvs = [d.trans for d in data]
return np.asarray([pack_d1(d, False) for d in data]),tvs
def unpack_d1(data, qgrid, label, trans_value):
""" utility function to creat a Data1d object from hdf dataset
sepatately given data[intensity and error], qgrid, label, and trans
works for a dataset that include a list of 1d data as well
transMode is set to trans_mode.external
"""
if len(data.shape)>2:
if np.isscalar(trans_value): # this should only happen when intentionally setting trans to 0
trans_value = np.zeros(len(data))
return [unpack_d1(d, qgrid, label+("f%05d" % i), t) for i,(d,t) in enumerate(zip(data,trans_value))]
else:
ret = Data1d()
ret.qgrid = qgrid
ret.data = data[0]
ret.err = data[1]
ret.label = label
ret.set_trans(trans_mode.external, trans_value) # TODO: save transMode of d1s when packing
return ret
def merge_d1s(d1s, detectors, save_merged=False, debug=False):
""" utility function to merge 1D data sets, using functions under slnxs
d1s should contain data corresponding to detectors
"""
s0 = Data1d()
s0.qgrid = d1s[0].qgrid
d_tot = np.zeros(s0.qgrid.shape)
d_max = np.zeros(s0.qgrid.shape)
d_min = np.zeros(s0.qgrid.shape)+1.e32
e_tot = np.zeros(s0.qgrid.shape)
c_tot = np.zeros(s0.qgrid.shape)
w_tot = np.zeros(s0.qgrid.shape)
label = None
comments = ""
for d1 in d1s:
# empty part of the data is nan
idx = ~np.isnan(d1.data)
# simple averaging
#d_tot[idx] += d1.data[idx]
#e_tot[idx] += d1.err[idx]
c_tot[idx] += 1
# average using 1/sigma as weight
wt = 1/d1.err[idx]**2
d_tot[idx] += wt*d1.data[idx]
e_tot[idx] += d1.err[idx]**2*wt**2
w_tot[idx] += wt
idx1 = (np.ma.fix_invalid(d1.data, fill_value=-1)>d_max).data
d_max[idx1] = d1.data[idx1]
idx2 = (np.ma.fix_invalid(d1.data, fill_value=1e32)<d_min).data
d_min[idx2] = d1.data[idx2]
comments += d1.comments
if label is None:
label = d1.label
else:
label = common_name(label, d1.label)
# simple averaging
#s0.data[idx] /= c_tot[idx]
#s0.err[idx] /= np.sqrt(c_tot[idx])
# averaging by weight
s0.data = d_tot/w_tot
s0.err = np.sqrt(e_tot)/w_tot
idx = (c_tot>1)
s0.overlaps.append({'q_overlap': s0.qgrid[idx],
'raw_data1': d_max[idx],
'raw_data2': d_min[idx]})
s0.label = label
s0.comments = comments # .replace("# ", "## ")
if save_merged:
s0.save(s0.label+".dd", debug=debug)
return s0
# copied from pipeline-test: merge, fix_angular_range, interp_d2
def merge(ds):
""" merge a list of MatrixWithCoord together
the datatype should be DataType.qphi
"""
if len(ds)==1:
return ds[0].copy()
wt = np.zeros(ds[0].shape)
avg = np.zeros(ds[0].shape)
idx = None
for d in ds:
if d.shape!=avg.shape:
raise Exception("merge: the two data sets must have the same shape: ", d.shape, avg.shape)
idx = ~ | np.isnan(d) | numpy.isnan |
import random
import torch
import datasets
from typing import Union, List, Tuple, Dict
from dataclasses import dataclass
from torch.utils.data import Dataset
from transformers import PreTrainedTokenizer, BatchEncoding
from transformers import DataCollatorWithPadding
import numpy as np
from tqdm import tqdm
class PointDataset(Dataset):
def __init__(self, filename, sub_graph, max_groups, max_psglen, tokenizer, dataset_script_dir, dataset_cache_dir):
self._filename = filename
self._tokenizer = tokenizer
self.max_psglen = max_psglen
self.max_groups = max_groups
self.sub_graph = sub_graph
self.ir_dataset = datasets.load_dataset(
f'{dataset_script_dir}/json.py',
data_files=self._filename,
ignore_verifications=False,
cache_dir=dataset_cache_dir,
features=datasets.Features({
'qry': [datasets.Value('int32')],
'psg1': [[datasets.Value('int32')]],
'psg2': [[datasets.Value('int32')]],
'label': datasets.Value('int32'),
})
)['train']
self.total_len = len(self.ir_dataset)
def __len__(self):
return self.total_len
def __getitem__(self, item):
irdata = self.ir_dataset[item]
encoded_qry = irdata['qry']
passages = irdata[self.sub_graph][:16+self.max_groups]
label = irdata['label']
if len(passages) < 16+self.max_groups:
passages = [[] for i in range(16+self.max_groups)]
input_ids_2d = []
token_type_ids_2d =[]
attention_mask_2d =[]
passage_mask = []
for i in range(len(passages)):
if len(passages[i]) > 1:
encoding = self._tokenizer.encode_plus(encoded_qry, passages[i], truncation=True, max_length=self.max_psglen + 5, padding='max_length')
passage_mask.append(1)
else:
encoding = self._tokenizer.encode_plus(encoded_qry, truncation=True, max_length=self.max_psglen + 5, padding='max_length')
passage_mask.append(0)
input_ids_2d.append(encoding['input_ids'])
token_type_ids_2d.append(encoding['token_type_ids'])
attention_mask_2d.append(encoding['attention_mask'])
# return encoding
return {
"input_ids": | np.array(input_ids_2d) | numpy.array |
# --------------
# Importing header files
import numpy as np
# Path of the file has been stored in variable called 'path'
#New record
new_record=[[50, 9, 4, 1, 0, 0, 40, 0]]
#Code starts here
data_file=path # path for the file
data=np.genfromtxt(data_file, delimiter=",", skip_header=1)
census=np.concatenate((data,new_record),axis=0)
print(census)
# --------------
#Code starts here
age=np.array(census[:,:1])
print(age)
max_age=age.max()
min_age=age.min()
age_mean=np.mean(age)
age_std=np.std(age)
# --------------
#Code starts here
race_0=[]
race_1=[]
race_2=[]
race_3=[]
race_4=[]
x=[]
for i in census:
if i[2]==0:
race_0.append(i)
elif i[2]==1:
race_1.append(i)
elif i[2]==2:
race_2.append(i)
elif i[2]==3:
race_3.append(i)
elif i[2]==4:
race_4.append(i)
race_1=np.asarray(race_1)
race_2=np.asarray(race_2)
race_3= | np.asarray(race_3) | numpy.asarray |
import numpy as np
from mpmath import *
n = 100 # profunditat
Z1 = 0.1 + 0.5 * 1j # impedàncies
Z2 = 0.02 + 0.13 * 1j
Z3 = 0.023 + 0.1 * 1j
Zp = -10 * 1j
Y1 = 1 / Z1 # admitàncies
Y2 = 1 / Z2
Y3 = 1 / Z3
Yp = 1 / Zp
P = -1 # dades
Q = -0.1
Va = 1.1
van = 0.5 # dades de la làmpada
lam = 2 * np.sqrt(2) / np.pi
In = np.sqrt(1 - van * van * (2 - lam * lam)) * 1
ang = -np.pi / 2 + np.arctan((van * np.sqrt(lam * lam - van * van)) / (1 - van * van))
Vb = np.zeros(n, dtype=complex) # sèries a calcular
Vc = np.zeros(n, dtype=complex)
R = np.zeros(n, dtype=complex)
X = np.zeros(n, dtype=complex)
F = np.zeros(n, dtype=complex)
L = np.zeros(n, dtype=complex)
Y = np.zeros(n, dtype=complex)
M = np.zeros(n, dtype=complex)
B = np.zeros(n, dtype=complex)
INL = np.zeros(n, dtype=complex)
Vb[0] = Va # inicialització de les sèries
Vc[0] = (-Va * Y1 - Vb[0] * Y3) / (-Y1 - Y3)
R[0] = 1 / conj(Vb[0])
X[0] = 1 / np.real(Vc[0])
F[0] = np.imag(Vc[0]) * X[0]
B[0] = 1 + F[0] * F[0]
L[0] = np.sqrt(B[0])
Y[0] = 1 / L[0]
M[0] = F[0] * Y[0]
INL[0] = In * 1 * (cos(ang) * Y[0] - sin(ang) * M[0]) + In * 1 *(sin(ang) * Y[0] + cos(ang) * M[0])*1j
sumatori1 = 0
sumatori2 = 0
from Funcions import pade4all
def sumaR(R, Vb, i): # convolució entre R i Vb
suma = 0
for k in range(i):
suma += R[k] * conj(Vb[i - k])
return suma
def sumaX(X, Vc, i): # convolució entre X i Vc real
suma = 0
for k in range(i):
suma += X[k] * | np.real(Vc[i - k]) | numpy.real |
# This module has been generated automatically from space group information
# obtained from the Computational Crystallography Toolbox
#
"""
Space groups
This module contains a list of all the 230 space groups that can occur in
a crystal. The variable space_groups contains a dictionary that maps
space group numbers and space group names to the corresponding space
group objects.
.. moduleauthor:: <NAME> <<EMAIL>>
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2013 The Mosaic Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file LICENSE.txt, distributed as part of this software.
#-----------------------------------------------------------------------------
import numpy as N
class SpaceGroup(object):
"""
Space group
All possible space group objects are created in this module. Other
modules should access these objects through the dictionary
space_groups rather than create their own space group objects.
"""
def __init__(self, number, symbol, transformations):
"""
:param number: the number assigned to the space group by
international convention
:type number: int
:param symbol: the Hermann-Mauguin space-group symbol as used
in PDB and mmCIF files
:type symbol: str
:param transformations: a list of space group transformations,
each consisting of a tuple of three
integer arrays (rot, tn, td), where
rot is the rotation matrix and tn/td
are the numerator and denominator of the
translation vector. The transformations
are defined in fractional coordinates.
:type transformations: list
"""
self.number = number
self.symbol = symbol
self.transformations = transformations
self.transposed_rotations = N.array([N.transpose(t[0])
for t in transformations])
self.phase_factors = N.exp(N.array([(-2j*N.pi*t[1])/t[2]
for t in transformations]))
def __repr__(self):
return "SpaceGroup(%d, %s)" % (self.number, repr(self.symbol))
def __len__(self):
"""
:return: the number of space group transformations
:rtype: int
"""
return len(self.transformations)
def symmetryEquivalentMillerIndices(self, hkl):
"""
:param hkl: a set of Miller indices
:type hkl: Scientific.N.array_type
:return: a tuple (miller_indices, phase_factor) of two arrays
of length equal to the number of space group
transformations. miller_indices contains the Miller
indices of each reflection equivalent by symmetry to the
reflection hkl (including hkl itself as the first element).
phase_factor contains the phase factors that must be applied
to the structure factor of reflection hkl to obtain the
structure factor of the symmetry equivalent reflection.
:rtype: tuple
"""
hkls = N.dot(self.transposed_rotations, hkl)
p = N.multiply.reduce(self.phase_factors**hkl, -1)
return hkls, p
space_groups = {}
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(1, 'P 1', transformations)
space_groups[1] = sg
space_groups['P 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(2, 'P -1', transformations)
space_groups[2] = sg
space_groups['P -1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(3, 'P 1 2 1', transformations)
space_groups[3] = sg
space_groups['P 1 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(4, 'P 1 21 1', transformations)
space_groups[4] = sg
space_groups['P 1 21 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(5, 'C 1 2 1', transformations)
space_groups[5] = sg
space_groups['C 1 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(6, 'P 1 m 1', transformations)
space_groups[6] = sg
space_groups['P 1 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(7, 'P 1 c 1', transformations)
space_groups[7] = sg
space_groups['P 1 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(8, 'C 1 m 1', transformations)
space_groups[8] = sg
space_groups['C 1 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(9, 'C 1 c 1', transformations)
space_groups[9] = sg
space_groups['C 1 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(10, 'P 1 2/m 1', transformations)
space_groups[10] = sg
space_groups['P 1 2/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(11, 'P 1 21/m 1', transformations)
space_groups[11] = sg
space_groups['P 1 21/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(12, 'C 1 2/m 1', transformations)
space_groups[12] = sg
space_groups['C 1 2/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(13, 'P 1 2/c 1', transformations)
space_groups[13] = sg
space_groups['P 1 2/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(14, 'P 1 21/c 1', transformations)
space_groups[14] = sg
space_groups['P 1 21/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(15, 'C 1 2/c 1', transformations)
space_groups[15] = sg
space_groups['C 1 2/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(16, 'P 2 2 2', transformations)
space_groups[16] = sg
space_groups['P 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(17, 'P 2 2 21', transformations)
space_groups[17] = sg
space_groups['P 2 2 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(18, 'P 21 21 2', transformations)
space_groups[18] = sg
space_groups['P 21 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(19, 'P 21 21 21', transformations)
space_groups[19] = sg
space_groups['P 21 21 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(20, 'C 2 2 21', transformations)
space_groups[20] = sg
space_groups['C 2 2 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(21, 'C 2 2 2', transformations)
space_groups[21] = sg
space_groups['C 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(22, 'F 2 2 2', transformations)
space_groups[22] = sg
space_groups['F 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(23, 'I 2 2 2', transformations)
space_groups[23] = sg
space_groups['I 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(24, 'I 21 21 21', transformations)
space_groups[24] = sg
space_groups['I 21 21 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(25, 'P m m 2', transformations)
space_groups[25] = sg
space_groups['P m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(26, 'P m c 21', transformations)
space_groups[26] = sg
space_groups['P m c 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(27, 'P c c 2', transformations)
space_groups[27] = sg
space_groups['P c c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(28, 'P m a 2', transformations)
space_groups[28] = sg
space_groups['P m a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(29, 'P c a 21', transformations)
space_groups[29] = sg
space_groups['P c a 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(30, 'P n c 2', transformations)
space_groups[30] = sg
space_groups['P n c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(31, 'P m n 21', transformations)
space_groups[31] = sg
space_groups['P m n 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(32, 'P b a 2', transformations)
space_groups[32] = sg
space_groups['P b a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(33, 'P n a 21', transformations)
space_groups[33] = sg
space_groups['P n a 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(34, 'P n n 2', transformations)
space_groups[34] = sg
space_groups['P n n 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(35, 'C m m 2', transformations)
space_groups[35] = sg
space_groups['C m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(36, 'C m c 21', transformations)
space_groups[36] = sg
space_groups['C m c 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(37, 'C c c 2', transformations)
space_groups[37] = sg
space_groups['C c c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(38, 'A m m 2', transformations)
space_groups[38] = sg
space_groups['A m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(39, 'A b m 2', transformations)
space_groups[39] = sg
space_groups['A b m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(40, 'A m a 2', transformations)
space_groups[40] = sg
space_groups['A m a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(41, 'A b a 2', transformations)
space_groups[41] = sg
space_groups['A b a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(42, 'F m m 2', transformations)
space_groups[42] = sg
space_groups['F m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(43, 'F d d 2', transformations)
space_groups[43] = sg
space_groups['F d d 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(44, 'I m m 2', transformations)
space_groups[44] = sg
space_groups['I m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(45, 'I b a 2', transformations)
space_groups[45] = sg
space_groups['I b a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(46, 'I m a 2', transformations)
space_groups[46] = sg
space_groups['I m a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(47, 'P m m m', transformations)
space_groups[47] = sg
space_groups['P m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(48, 'P n n n :2', transformations)
space_groups[48] = sg
space_groups['P n n n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(49, 'P c c m', transformations)
space_groups[49] = sg
space_groups['P c c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(50, 'P b a n :2', transformations)
space_groups[50] = sg
space_groups['P b a n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(51, 'P m m a', transformations)
space_groups[51] = sg
space_groups['P m m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(52, 'P n n a', transformations)
space_groups[52] = sg
space_groups['P n n a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(53, 'P m n a', transformations)
space_groups[53] = sg
space_groups['P m n a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(54, 'P c c a', transformations)
space_groups[54] = sg
space_groups['P c c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(55, 'P b a m', transformations)
space_groups[55] = sg
space_groups['P b a m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(56, 'P c c n', transformations)
space_groups[56] = sg
space_groups['P c c n'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(57, 'P b c m', transformations)
space_groups[57] = sg
space_groups['P b c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(58, 'P n n m', transformations)
space_groups[58] = sg
space_groups['P n n m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(59, 'P m m n :2', transformations)
space_groups[59] = sg
space_groups['P m m n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(60, 'P b c n', transformations)
space_groups[60] = sg
space_groups['P b c n'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(61, 'P b c a', transformations)
space_groups[61] = sg
space_groups['P b c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(62, 'P n m a', transformations)
space_groups[62] = sg
space_groups['P n m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(63, 'C m c m', transformations)
space_groups[63] = sg
space_groups['C m c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(64, 'C m c a', transformations)
space_groups[64] = sg
space_groups['C m c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(65, 'C m m m', transformations)
space_groups[65] = sg
space_groups['C m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(66, 'C c c m', transformations)
space_groups[66] = sg
space_groups['C c c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(67, 'C m m a', transformations)
space_groups[67] = sg
space_groups['C m m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(68, 'C c c a :2', transformations)
space_groups[68] = sg
space_groups['C c c a :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(69, 'F m m m', transformations)
space_groups[69] = sg
space_groups['F m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,3,3])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,0,3])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(70, 'F d d d :2', transformations)
space_groups[70] = sg
space_groups['F d d d :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(71, 'I m m m', transformations)
space_groups[71] = sg
space_groups['I m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(72, 'I b a m', transformations)
space_groups[72] = sg
space_groups['I b a m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(73, 'I b c a', transformations)
space_groups[73] = sg
space_groups['I b c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(74, 'I m m a', transformations)
space_groups[74] = sg
space_groups['I m m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(75, 'P 4', transformations)
space_groups[75] = sg
space_groups['P 4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(76, 'P 41', transformations)
space_groups[76] = sg
space_groups['P 41'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(77, 'P 42', transformations)
space_groups[77] = sg
space_groups['P 42'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(78, 'P 43', transformations)
space_groups[78] = sg
space_groups['P 43'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(79, 'I 4', transformations)
space_groups[79] = sg
space_groups['I 4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(80, 'I 41', transformations)
space_groups[80] = sg
space_groups['I 41'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(81, 'P -4', transformations)
space_groups[81] = sg
space_groups['P -4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(82, 'I -4', transformations)
space_groups[82] = sg
space_groups['I -4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(83, 'P 4/m', transformations)
space_groups[83] = sg
space_groups['P 4/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(84, 'P 42/m', transformations)
space_groups[84] = sg
space_groups['P 42/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(85, 'P 4/n :2', transformations)
space_groups[85] = sg
space_groups['P 4/n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(86, 'P 42/n :2', transformations)
space_groups[86] = sg
space_groups['P 42/n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(87, 'I 4/m', transformations)
space_groups[87] = sg
space_groups['I 4/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,5,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(88, 'I 41/a :2', transformations)
space_groups[88] = sg
space_groups['I 41/a :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(89, 'P 4 2 2', transformations)
space_groups[89] = sg
space_groups['P 4 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(90, 'P 4 21 2', transformations)
space_groups[90] = sg
space_groups['P 4 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(91, 'P 41 2 2', transformations)
space_groups[91] = sg
space_groups['P 41 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(92, 'P 41 21 2', transformations)
space_groups[92] = sg
space_groups['P 41 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(93, 'P 42 2 2', transformations)
space_groups[93] = sg
space_groups['P 42 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(94, 'P 42 21 2', transformations)
space_groups[94] = sg
space_groups['P 42 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(95, 'P 43 2 2', transformations)
space_groups[95] = sg
space_groups['P 43 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(96, 'P 43 21 2', transformations)
space_groups[96] = sg
space_groups['P 43 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(97, 'I 4 2 2', transformations)
space_groups[97] = sg
space_groups['I 4 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(98, 'I 41 2 2', transformations)
space_groups[98] = sg
space_groups['I 41 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(99, 'P 4 m m', transformations)
space_groups[99] = sg
space_groups['P 4 m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(100, 'P 4 b m', transformations)
space_groups[100] = sg
space_groups['P 4 b m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(101, 'P 42 c m', transformations)
space_groups[101] = sg
space_groups['P 42 c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(102, 'P 42 n m', transformations)
space_groups[102] = sg
space_groups['P 42 n m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(103, 'P 4 c c', transformations)
space_groups[103] = sg
space_groups['P 4 c c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(104, 'P 4 n c', transformations)
space_groups[104] = sg
space_groups['P 4 n c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(105, 'P 42 m c', transformations)
space_groups[105] = sg
space_groups['P 42 m c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(106, 'P 42 b c', transformations)
space_groups[106] = sg
space_groups['P 42 b c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(107, 'I 4 m m', transformations)
space_groups[107] = sg
space_groups['I 4 m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(108, 'I 4 c m', transformations)
space_groups[108] = sg
space_groups['I 4 c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(109, 'I 41 m d', transformations)
space_groups[109] = sg
space_groups['I 41 m d'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(110, 'I 41 c d', transformations)
space_groups[110] = sg
space_groups['I 41 c d'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(111, 'P -4 2 m', transformations)
space_groups[111] = sg
space_groups['P -4 2 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(112, 'P -4 2 c', transformations)
space_groups[112] = sg
space_groups['P -4 2 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(113, 'P -4 21 m', transformations)
space_groups[113] = sg
space_groups['P -4 21 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(114, 'P -4 21 c', transformations)
space_groups[114] = sg
space_groups['P -4 21 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(115, 'P -4 m 2', transformations)
space_groups[115] = sg
space_groups['P -4 m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(116, 'P -4 c 2', transformations)
space_groups[116] = sg
space_groups['P -4 c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(117, 'P -4 b 2', transformations)
space_groups[117] = sg
space_groups['P -4 b 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(118, 'P -4 n 2', transformations)
space_groups[118] = sg
space_groups['P -4 n 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(119, 'I -4 m 2', transformations)
space_groups[119] = sg
space_groups['I -4 m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(120, 'I -4 c 2', transformations)
space_groups[120] = sg
space_groups['I -4 c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(121, 'I -4 2 m', transformations)
space_groups[121] = sg
space_groups['I -4 2 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(122, 'I -4 2 d', transformations)
space_groups[122] = sg
space_groups['I -4 2 d'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(123, 'P 4/m m m', transformations)
space_groups[123] = sg
space_groups['P 4/m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(124, 'P 4/m c c', transformations)
space_groups[124] = sg
space_groups['P 4/m c c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(125, 'P 4/n b m :2', transformations)
space_groups[125] = sg
space_groups['P 4/n b m :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(126, 'P 4/n n c :2', transformations)
space_groups[126] = sg
space_groups['P 4/n n c :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(127, 'P 4/m b m', transformations)
space_groups[127] = sg
space_groups['P 4/m b m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(128, 'P 4/m n c', transformations)
space_groups[128] = sg
space_groups['P 4/m n c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(129, 'P 4/n m m :2', transformations)
space_groups[129] = sg
space_groups['P 4/n m m :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(130, 'P 4/n c c :2', transformations)
space_groups[130] = sg
space_groups['P 4/n c c :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(131, 'P 42/m m c', transformations)
space_groups[131] = sg
space_groups['P 42/m m c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(132, 'P 42/m c m', transformations)
space_groups[132] = sg
space_groups['P 42/m c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(133, 'P 42/n b c :2', transformations)
space_groups[133] = sg
space_groups['P 42/n b c :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(134, 'P 42/n n m :2', transformations)
space_groups[134] = sg
space_groups['P 42/n n m :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(135, 'P 42/m b c', transformations)
space_groups[135] = sg
space_groups['P 42/m b c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(136, 'P 42/m n m', transformations)
space_groups[136] = sg
space_groups['P 42/m n m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(137, 'P 42/n m c :2', transformations)
space_groups[137] = sg
space_groups['P 42/n m c :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(138, 'P 42/n c m :2', transformations)
space_groups[138] = sg
space_groups['P 42/n c m :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(139, 'I 4/m m m', transformations)
space_groups[139] = sg
space_groups['I 4/m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(140, 'I 4/m c m', transformations)
space_groups[140] = sg
space_groups['I 4/m c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,5,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,5,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,3,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(141, 'I 41/a m d :2', transformations)
space_groups[141] = sg
space_groups['I 41/a m d :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,5,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,5,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(142, 'I 41/a c d :2', transformations)
space_groups[142] = sg
space_groups['I 41/a c d :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(143, 'P 3', transformations)
space_groups[143] = sg
space_groups['P 3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(144, 'P 31', transformations)
space_groups[144] = sg
space_groups['P 31'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(145, 'P 32', transformations)
space_groups[145] = sg
space_groups['P 32'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(146, 'R 3 :H', transformations)
space_groups[146] = sg
space_groups['R 3 :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(147, 'P -3', transformations)
space_groups[147] = sg
space_groups['P -3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(148, 'R -3 :H', transformations)
space_groups[148] = sg
space_groups['R -3 :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(149, 'P 3 1 2', transformations)
space_groups[149] = sg
space_groups['P 3 1 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(150, 'P 3 2 1', transformations)
space_groups[150] = sg
space_groups['P 3 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(151, 'P 31 1 2', transformations)
space_groups[151] = sg
space_groups['P 31 1 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(152, 'P 31 2 1', transformations)
space_groups[152] = sg
space_groups['P 31 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(153, 'P 32 1 2', transformations)
space_groups[153] = sg
space_groups['P 32 1 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(154, 'P 32 2 1', transformations)
space_groups[154] = sg
space_groups['P 32 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(155, 'R 3 2 :H', transformations)
space_groups[155] = sg
space_groups['R 3 2 :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(156, 'P 3 m 1', transformations)
space_groups[156] = sg
space_groups['P 3 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(157, 'P 3 1 m', transformations)
space_groups[157] = sg
space_groups['P 3 1 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(158, 'P 3 c 1', transformations)
space_groups[158] = sg
space_groups['P 3 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(159, 'P 3 1 c', transformations)
space_groups[159] = sg
space_groups['P 3 1 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(160, 'R 3 m :H', transformations)
space_groups[160] = sg
space_groups['R 3 m :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,7])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,7])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,7])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,5])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,5])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,5])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(161, 'R 3 c :H', transformations)
space_groups[161] = sg
space_groups['R 3 c :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(162, 'P -3 1 m', transformations)
space_groups[162] = sg
space_groups['P -3 1 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(163, 'P -3 1 c', transformations)
space_groups[163] = sg
space_groups['P -3 1 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(164, 'P -3 m 1', transformations)
space_groups[164] = sg
space_groups['P -3 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(165, 'P -3 c 1', transformations)
space_groups[165] = sg
space_groups['P -3 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(166, 'R -3 m :H', transformations)
space_groups[166] = sg
space_groups['R -3 m :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,7])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,7])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,7])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,1])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,1])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,1])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,5])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,5])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,5])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,-1])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,-1])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,-1])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(167, 'R -3 c :H', transformations)
space_groups[167] = sg
space_groups['R -3 c :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(168, 'P 6', transformations)
space_groups[168] = sg
space_groups['P 6'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,5])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(169, 'P 61', transformations)
space_groups[169] = sg
space_groups['P 61'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,5])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(170, 'P 65', transformations)
space_groups[170] = sg
space_groups['P 65'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(171, 'P 62', transformations)
space_groups[171] = sg
space_groups['P 62'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(172, 'P 64', transformations)
space_groups[172] = sg
space_groups['P 64'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(173, 'P 63', transformations)
space_groups[173] = sg
space_groups['P 63'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(174, 'P -6', transformations)
space_groups[174] = sg
space_groups['P -6'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(175, 'P 6/m', transformations)
space_groups[175] = sg
space_groups['P 6/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(176, 'P 63/m', transformations)
space_groups[176] = sg
space_groups['P 63/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(177, 'P 6 2 2', transformations)
space_groups[177] = sg
space_groups['P 6 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,5])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,5])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(178, 'P 61 2 2', transformations)
space_groups[178] = sg
space_groups['P 61 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,5])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,5])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(179, 'P 65 2 2', transformations)
space_groups[179] = sg
space_groups['P 65 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(180, 'P 62 2 2', transformations)
space_groups[180] = sg
space_groups['P 62 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(181, 'P 64 2 2', transformations)
space_groups[181] = sg
space_groups['P 64 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(182, 'P 63 2 2', transformations)
space_groups[182] = sg
space_groups['P 63 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(183, 'P 6 m m', transformations)
space_groups[183] = sg
space_groups['P 6 m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(184, 'P 6 c c', transformations)
space_groups[184] = sg
space_groups['P 6 c c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(185, 'P 63 c m', transformations)
space_groups[185] = sg
space_groups['P 63 c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(186, 'P 63 m c', transformations)
space_groups[186] = sg
space_groups['P 63 m c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(187, 'P -6 m 2', transformations)
space_groups[187] = sg
space_groups['P -6 m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(188, 'P -6 c 2', transformations)
space_groups[188] = sg
space_groups['P -6 c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(189, 'P -6 2 m', transformations)
space_groups[189] = sg
space_groups['P -6 2 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(190, 'P -6 2 c', transformations)
space_groups[190] = sg
space_groups['P -6 2 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(191, 'P 6/m m m', transformations)
space_groups[191] = sg
space_groups['P 6/m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(192, 'P 6/m c c', transformations)
space_groups[192] = sg
space_groups['P 6/m c c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(193, 'P 63/m c m', transformations)
space_groups[193] = sg
space_groups['P 63/m c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(194, 'P 63/m m c', transformations)
space_groups[194] = sg
space_groups['P 63/m m c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(195, 'P 2 3', transformations)
space_groups[195] = sg
space_groups['P 2 3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(196, 'F 2 3', transformations)
space_groups[196] = sg
space_groups['F 2 3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(197, 'I 2 3', transformations)
space_groups[197] = sg
space_groups['I 2 3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(198, 'P 21 3', transformations)
space_groups[198] = sg
space_groups['P 21 3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(199, 'I 21 3', transformations)
space_groups[199] = sg
space_groups['I 21 3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(200, 'P m -3', transformations)
space_groups[200] = sg
space_groups['P m -3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(201, 'P n -3 :2', transformations)
space_groups[201] = sg
space_groups['P n -3 :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(202, 'F m -3', transformations)
space_groups[202] = sg
space_groups['F m -3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,3,3])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,3,3])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,3,3])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,0,3])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,0,3])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,0,3])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(203, 'F d -3 :2', transformations)
space_groups[203] = sg
space_groups['F d -3 :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(204, 'I m -3', transformations)
space_groups[204] = sg
space_groups['I m -3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(205, 'P a -3', transformations)
space_groups[205] = sg
space_groups['P a -3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(206, 'I a -3', transformations)
space_groups[206] = sg
space_groups['I a -3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = | N.array([-1,0,0,0,1,0,0,0,-1]) | numpy.array |
"""
cam_predict.py
Functions for testing CAM predictions
@author <NAME>
@date 19 April 2019
"""
import numpy as np
from random import random
from collections import defaultdict
def gene_differential(E,syn,neigh):
"""
Returns list of gene (indices) that have higher expression in the
synaptic partners compared to (nonsynaptic) neighbors
Parameters:
-----------
E : numpy array
Expression matrix
syn : list
list of synaptic partners at each synapse
neigh : list
list of (nonsynaptic) neighbors at each synapse
Note: syn[i] and neigh[i] correpspond to the ith synapse
"""
(n,m) = E.shape
k = len(syn)
syn_count = np.zeros(m)
neigh_count = np.zeros(m)
for i in range(k):
sdx = syn[i]
ndx = neigh[i]
ssum = np.sum(E[sdx,:],axis=0)
ssum[ssum > 0] = 1
syn_count += ssum
nsum = np.sum(E[ndx,:],axis=0)
nsum[nsum > 0] = 1
neigh_count += nsum
diff = ssum - nsum
tmp = E[sdx,:] - nsum
#print('gene diff',np.where(diff > 0))
#print(sdx,ndx,np.where(diff > 1))
diff = syn_count - neigh_count
#print('Sum gene diff',np.where(diff > 0))
#print(k)
#reldff = 0.5*(syn_count - neigh_count) / (syn_count + neigh_count)
#print('Rel diff', np.where(reldff))
return np.where(diff > 0)[0].tolist()
def gene_cell_profile(E,syn,neigh):
"""
Returns dictionary of cam profiles for postsynaptic partners
Dict format = {cell:cam_feature_vector}
Parameters:
-----------
E : numpy array
Expression matrix
syn : list
list of synaptic partners at each synapse
neigh : list
list of (nonsynaptic) neighbors at each synapse
Note: syn[i] and neigh[i] correpspond to the ith synapse
"""
(n,m) = E.shape
k = len(syn)
profile = defaultdict(lambda:np.zeros(m))
syn_count = defaultdict(int)
for i in range(k):
ssum = np.sum(E[syn[i],:],axis=0)
ssum[ssum > 0] = 1
nsum = np.sum(E[neigh[i],:],axis=0)
nsum[nsum > 0] = 1
diff = ssum - nsum
diff[diff < 1] = 0
for j in syn[i]:
profile[j] += diff
syn_count[j] += 1
for j in profile: profile[j] /= syn_count[j]
return profile
def gene_mean_profile(E,syn,neigh):
"""
Returns dictionary of cam profiles for postsynaptic partners
Dict format = {cell:cam_feature_vector}
Parameters:
-----------
E : numpy array
Expression matrix
syn : list
list of synaptic partners at each synapse
neigh : list
list of (nonsynaptic) neighbors at each synapse
Note: syn[i] and neigh[i] correpspond to the ith synapse
"""
(n,m) = E.shape
k = len(syn)
profile = np.zeros(m)
syn_count = 0
for i in range(k):
ssum = np.sum(E[syn[i],:],axis=0)
ssum[ssum > 0] = 1
nsum = np.sum(E[neigh[i],:],axis=0)
nsum[nsum > 0] = 1
diff = ssum - nsum
diff[diff < 1] = 0
if syn[i]:
profile += diff
syn_count += 1
profile /= syn_count
return profile
def get_synapse_data(S,e,cpartners=set([]),screen=None,remove_partners = False):
"""
Formats the synapse data. Returns list synapse (syn) and neighbors (neigh)
where cell names have been converted to cell indicies
Parameters:
-----------
S : dictionary
Dictionary synapse data
e : expression matrix object
cpartners : set (optional)
List of synaptic partners to remove from all neighbors
screen : string (optional)
Screen synapse based on data set. Will screen based on image name. Suggest using
'N2U' for adult synapses and 'JSH' for L4 synapses.
"""
syn,neigh,cneigh = [],[],[]
for cont in S:
if screen and screen not in S[cont]['sections'][0]: continue
partners = set(S[cont]['partners'])
neighbors = set(S[cont]['neighbors'])
nonsyn = neighbors - partners
if remove_partners:
nonsyn = nonsyn - cpartners
_cneigh = neighbors & cpartners
syn.append([e.cells[n] for n in partners if n in e.cells])
neigh.append([e.cells[n] for n in nonsyn if n in e.cells])
cneigh.append([e.cells[n] for n in _cneigh if n in e.cells])
return syn,neigh,cneigh
def score_overlap(sig,test):
"""
Scores the overlap between the gene signature and the test signature
Parameters:
-----------
sig: set
Gene signature
test: set
Test signature
"""
num = len(sig & test)
den = float(len(sig))
return num / den
def get_overlap(sig,E,syn,neigh):
"""
Returns the overlap between the computed the gene signature and the
gene expression of synaptic and (nonsynaptic) partners.
Paramters:
----------
sig : set
Gene signature
E : numpy array
Expression matrix
syn : list
List of synaptic partners at each synapse
neigh : list
List of neighbors at each synapse
Return:
-------
ssig : list
List of overlap scores for each synaptic partner at each synapse
neigh : list
List of overlap score for each neighbor at each synapse
idsyn : float
Fraction of synapses where the highest overlap score is a synaptic partner
"""
k = len(syn)
den = float(len(sig))
sig = set(sig)
ssig,nsig = [],[]
idsyn = 0
for i in range(k):
synscore = [0]
neighscore = [0]
for j in syn[i]:
_ssig = set(np.where(E[j,:] > 0)[0].tolist())
score = score_overlap(sig,_ssig)
synscore.append(score)
ssig.append(score)
for j in neigh[i]:
_nsig = set(np.where(E[j,:]> 0)[0].tolist())
score = score_overlap(sig,_nsig)
neighscore.append(score)
nsig.append(score)
if max(synscore) > max(neighscore): idsyn += 1
return ssig,nsig,idsyn/float(k)
def get_overlap_spatial_loc(sig,E,syn,neigh,cneigh):
"""
Returns the overlap between the computed the gene signature and the
gene expression of synaptic and (nonsynaptic) partners.
Paramters:
----------
sig : set
Gene signature
E : numpy array
Expression matrix
syn : list
List of synaptic partners at each synapse
neigh : list
List of neighbors at each synapse
cneigh : lsit
List of neighbors that are synaptic partners elsewhere
Return:
-------
ssig : list
List of overlap scores for each synaptic partner at each synapse
neigh : list
List of overlap score for each neighbor at each synapse
idsyn : float
Fraction of synapses where the highest overlap score is a synaptic partner
"""
k = len(syn)
den = float(len(sig))
sig = set(sig)
ssig,nsig = [],[]
idsyn = 0
for i in range(k):
synscore = [0]
neighscore = [0]
for j in syn[i]:
_ssig = set(np.where(E[j,:] > 0)[0].tolist())
score = score_overlap(sig,_ssig)
synscore.append(score)
ssig.append(score)
for j in neigh[i]:
_nsig = set( | np.where(E[j,:]> 0) | numpy.where |
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 20 12:16:29 2021
@author: WANGH0M
"""
import numpy as np
from scipy import sparse
from constraints_basic import columnnew,\
con_edge,con_unit,con_constl,con_equal_length,\
con_constangle2,con_constangle,con_unit_vector,con_dependent_vector,\
con_planarity,con_osculating_tangent,con_diagonal,\
con_equal_opposite_angle,\
con_dot,con_cross_product2,con_bisecting_vector,\
con_normal_constraints, con_planarity_constraints,\
con_unit_tangentplane_normal
# -------------------------------------------------------------------------
# common used net-constraints:
# -------------------------------------------------------------------------
#--------------------------------------------------------------------------
# isogonals:
#--------------------------------------------------------------------------
def con_unit_edge(rregular=False,**kwargs):
""" unit_edge / unit_diag_edge
X += [l1,l2,l3,l4,ue1,ue2,ue3,ue4]
(vi-v) = li*ui, ui**2=1, (i=1,2,3,4)
"""
if kwargs.get('unit_diag_edge'):
w = kwargs.get('unit_diag_edge')
diag=True
elif kwargs.get('unit_edge'):
w = kwargs.get('unit_edge')
diag=False
mesh = kwargs.get('mesh')
X = kwargs.get('X')
N = kwargs.get('N')
N5 = kwargs.get('N5')
V = mesh.V
if diag:
v,v1,v2,v3,v4 = mesh.rr_star_corner
elif rregular:
v,v1,v2,v3,v4 = mesh.rr_star[mesh.ind_rr_star_v4f4].T
else:
#v,v1,v2,v3,v4 = mesh.ver_regular_star.T # default angle=90, non-orient
v,v1,v2,v3,v4 = mesh.ver_star_matrix.T # oriented
num = len(v)
c_v = columnnew(v,0,V)
c_v1 = columnnew(v1,0,V)
c_v2 = columnnew(v2,0,V)
c_v3 = columnnew(v3,0,V)
c_v4 = columnnew(v4,0,V)
arr = np.arange(num)
c_l1 = N5-16*num + arr
c_l2 = c_l1 + num
c_l3 = c_l2 + num
c_l4 = c_l3 + num
c_ue1 = columnnew(arr,N5-12*num,num)
c_ue2 = columnnew(arr,N5-9*num,num)
c_ue3 = columnnew(arr,N5-6*num,num)
c_ue4 = columnnew(arr,N5-3*num,num)
H1,r1 = con_edge(X,c_v1,c_v,c_l1,c_ue1,num,N)
H2,r2 = con_edge(X,c_v2,c_v,c_l2,c_ue2,num,N)
H3,r3 = con_edge(X,c_v3,c_v,c_l3,c_ue3,num,N)
H4,r4 = con_edge(X,c_v4,c_v,c_l4,c_ue4,num,N)
Hu1,ru1 = con_unit(X,c_ue1,num,N)
Hu2,ru2 = con_unit(X,c_ue2,num,N)
Hu3,ru3 = con_unit(X,c_ue3,num,N)
Hu4,ru4 = con_unit(X,c_ue4,num,N)
H = sparse.vstack((H1,H2,H3,H4,Hu1,Hu2,Hu3,Hu4))
r = np.r_[r1,r2,r3,r4,ru1,ru2,ru3,ru4]
return H*w,r*w
def con_orthogonal(diagmesh=False,**kwargs): # simpliest one, for auxetic-cmc-case
"""(v1-v3)*(v2-v4)=0, no auxilary variables
"""
if kwargs.get('orthogonal'):
w = kwargs.get('orthogonal')
elif kwargs.get('orthogonal_diag'):
w = kwargs.get('orthogonal_diag')
diagmesh=True
mesh = kwargs.get('mesh')
X = kwargs.get('X')
N = kwargs.get('N')
V = mesh.V
if diagmesh:
"(v1-v3)*(v2-v4)=0"
v,v1,v2,v3,v4 = mesh.rr_star_corner
else:
v0, vj, l = mesh.vertex_ring_vertices_iterators(order=True,
return_lengths=True)
ind = np.in1d(v0, np.where(l == 4)[0])
v0 = v0[ind]
vj = vj[ind]
v = v0[::4]
v1,v2,v3,v4 = vj[::4],vj[1::4],vj[2::4],vj[3::4]
c_v1 = columnnew(v1,0,V)
c_v2 = columnnew(v2,0,V)
c_v3 = columnnew(v3,0,V)
c_v4 = columnnew(v4,0,V)
col = np.r_[c_v1,c_v2,c_v3,c_v4]
num = len(v)
row = np.tile(np.arange(num),12)
d1 = X[c_v2]-X[c_v4]
d2 = X[c_v1]-X[c_v3]
d3 = X[c_v4]-X[c_v2]
d4 = X[c_v3]-X[c_v1]
data = np.r_[d1,d2,d3,d4]
H = sparse.coo_matrix((data,(row,col)), shape=(num, N))
r = np.einsum('ij,ij->i',d1.reshape(-1,3, order='F'),d2.reshape(-1,3, order='F'))
#self.add_iterative_constraint(H*w, r*w, name)
return H*w,r*w
def con_orthogonal_midline(**kwargs):
""" this method is almost the same as above, minor differences at boundary
control quadfaces: two middle line are orthogonal to each other
quadface: v1,v2,v3,v4
middle lins: e1 = (v1+v2)/2-(v3+v4)/2; e2 = (v2+v3)/2-(v4+v1)/2
<===> e1 * e2 = 0 <==> (v1-v3)^2=(v2-v4)^2
"""
w = kwargs.get('orthogonal')
mesh = kwargs.get('mesh')
X = kwargs.get('X')
N = kwargs.get('N')
num = mesh.num_quadface
v1,v2,v3,v4 = mesh.rr_quadface.T # in odrder
c_v1 = columnnew(v1,0,mesh.V)
c_v2 = columnnew(v2,0,mesh.V)
c_v3 = columnnew(v3,0,mesh.V)
c_v4 = columnnew(v4,0,mesh.V)
H,r = con_equal_length(X,c_v1,c_v2,c_v3,c_v4,num,N)
return H*w,r*w
def con_isogonal(cos0,assign=False,**kwargs):
"""
keep tangent crossing angle
X += [lt1,lt2, ut1,ut2, cos]
(ue1-ue3) = lt1 * ut1, ut1**2 = 1
(ue2-ue4) = lt2 * ut2, ut2**2 = 1
ut1 * ut2 = cos
if assign:
cos == cos0
"""
w = kwargs.get('isogonal')
mesh = kwargs.get('mesh')
X = kwargs.get('X')
N = kwargs.get('N')
N5 = kwargs.get('N5')
N6 = kwargs.get('N6')
num = mesh.num_regular
arr = np.arange(num)
c_l1 = N6-8*num-1 + arr
c_l2 = c_l1+num
c_ut1 = columnnew(arr,N6-6*num-1,num)
c_ut2 = columnnew(arr,N6-3*num-1,num)
c_ue1 = columnnew(arr,N5-12*num,num)
c_ue2 = columnnew(arr,N5-9*num,num)
c_ue3 = columnnew(arr,N5-6*num,num)
c_ue4 = columnnew(arr,N5-3*num,num)
H1,r1 = con_edge(X,c_ue1,c_ue3,c_l1,c_ut1,num,N)
H2,r2 = con_edge(X,c_ue2,c_ue4,c_l2,c_ut2,num,N)
Hu1,ru1 = con_unit(X,c_ut1,num,N)
Hu2,ru2 = con_unit(X,c_ut2,num,N)
Ha,ra = con_constangle2(X,c_ut1,c_ut2,N6-1,num,N)
H = sparse.vstack((H1,H2,Hu1,Hu2,Ha))
r = np.r_[r1,r2,ru1,ru2,ra]
if assign:
H0,r0 = con_constl(np.array([N6-1],dtype=int),cos0,1,N)
H = sparse.vstack((H, H0))
r = np.r_[r,r0]
#self.add_iterative_constraint(H*w, r*w, 'isogonal')
#print('err:isogonal:',np.sum(np.square(H*X-r)))
return H*w,r*w
def con_isogonal_diagnet(cos0,assign=False,**kwargs):
"""
keep tangent crossing angle, of diagnal directions
X += [lt1,lt2, ut1,ut2, cos]
(ue1-ue3) = lt1 * ut1, ut1**2 = 1
(ue2-ue4) = lt2 * ut2, ut2**2 = 1
ut1 * ut2 = cos
if assign:
cos == cos0
"""
w = kwargs.get('isogonal_diagnet')
mesh = kwargs.get('mesh')
X = kwargs.get('X')
N = kwargs.get('N')
N5 = kwargs.get('N5')
N6 = kwargs.get('N6')
num = len(mesh.ind_rr_star_v4f4)
arr = np.arange(num)
c_l1 = N6-8*num-1 + arr
c_l2 = c_l1+num
c_ut1 = columnnew(arr,N6-6*num-1,num)
c_ut2 = columnnew(arr,N6-3*num-1,num)
c_ue1 = columnnew(arr,N5-12*num,num)
c_ue2 = columnnew(arr,N5-9*num,num)
c_ue3 = columnnew(arr,N5-6*num,num)
c_ue4 = columnnew(arr,N5-3*num,num)
H1,r1 = con_edge(X,c_ue1,c_ue3,c_l1,c_ut1,num,N)
H2,r2 = con_edge(X,c_ue2,c_ue4,c_l2,c_ut2,num,N)
Hu1,ru1 = con_unit(X,c_ut1,num,N)
Hu2,ru2 = con_unit(X,c_ut2,num,N)
Ha,ra = con_constangle2(X,c_ut1,c_ut2,N6-1,num,N)
H = sparse.vstack((H1,H2,Hu1,Hu2,Ha))
r = np.r_[r1,r2,ru1,ru2,ra]
if assign:
H0,r0 = con_constl(np.array([N6-1],dtype=int),cos0,1,N)
H = sparse.vstack((H, H0))
r = np.r_[r,r0]
#self.add_iterative_constraint(H*w, r*w, 'isogonal_diagnet')
return H*w,r*w
def con_isogonal_checkerboard_based(cos0,assign=False,**kwargs):
"""
quadface: diagonal crossing angle
X += [ld1,ld2, ud1,ud2]
1. (v1-v3) = ld1*ud1, ud1**2=1
2. (v2-v4) = ld2*ud2, ud2**2=1
3. ud1*ud2 == cos0
"""
w = kwargs.get('isogonal_ck_based')
mesh = kwargs.get('mesh')
X = kwargs.get('X')
N = kwargs.get('N')
N10 = kwargs.get('N10')
V = mesh.V
num = mesh.num_quadface
numl = N10-8*num-1
numud = N10-6*num-1
arr = np.arange(num)
c_ld1 = numl+arr
c_ld2 = numl+num+arr
v1,v2,v3,v4 = mesh.rr_quadface.T # in odrder
c_v1 = np.r_[v1,V+v1,2*V+v1] # [x,y,z]
c_v2 = np.r_[v2,V+v2,2*V+v2] # [x,y,z]
c_v3 = np.r_[v3,V+v3,2*V+v3] # [x,y,z]
c_v4 = np.r_[v4,V+v4,2*V+v4] # [x,y,z]
c_ud1 = np.r_[numud+arr,numud+num+arr,numud+2*num+arr]
c_ud2 = c_ud1+3*num
He1,re1 = con_edge(X,c_v1,c_v3,c_ld1,c_ud1,num,N)
He2,re2 = con_edge(X,c_v2,c_v4,c_ld2,c_ud2,num,N)
Hu1,ru1 = con_unit(X,c_ud1,num,N)
Hu2,ru2 = con_unit(X,c_ud2,num,N)
Ha,ra = con_constangle2(X,c_ud1,c_ud2,N10-1,num,N)
H = sparse.vstack((He1,He2,Hu1,Hu2,Ha*10))
r = np.r_[re1,re2,ru1,ru2,ra*10]
if assign:
H0,r0 = con_constl(np.array([N10-1],dtype=int),cos0,1,N)
H = sparse.vstack((H, H0))
r = np.r_[r,r0]
#self.add_iterative_constraint(H*w, r*w, 'isogonal_ck_based')
return H*w,r*w
def con_isogonal_quadface_based(cos0,assign=False,halfdiag=True,**kwargs):
"""
quadface: midedge point edge vectors
X += [ld1,ld2, ud1,ud2]
1. (v2+v3-v1-v4) = 2* ld1*ud1, ud1**2=1
2. (v3+v4-v1-v2) = 2* ld2*ud2, ud2**2=1
3. ud1*ud2 == cos0
"""
w = kwargs.get('isogonal_face_based')
mesh = kwargs.get('mesh')
X = kwargs.get('X')
N = kwargs.get('N')
N10 = kwargs.get('N10')
V = mesh.V
if halfdiag:
ib,ir = mesh.vertex_check_ind
_,v1,v2,v3,v4 = mesh.rr_star.T
v1,v2,v3,v4 = v1[ib],v2[ib],v3[ib],v4[ib]
num = len(v1)
else:
num = mesh.num_quadface
v1,v2,v3,v4 = mesh.rr_quadface.T # in odrder
numl = N10-8*num-1
numud = N10-6*num-1
arr = np.arange(num)
c_ld1 = numl+arr
c_ld2 = numl+num+arr
c_v1 = np.r_[v1,V+v1,2*V+v1] # [x,y,z]
c_v2 = np.r_[v2,V+v2,2*V+v2] # [x,y,z]
c_v3 = np.r_[v3,V+v3,2*V+v3] # [x,y,z]
c_v4 = np.r_[v4,V+v4,2*V+v4] # [x,y,z]
c_ud1 = np.r_[numud+arr,numud+num+arr,numud+2*num+arr]
c_ud2 = c_ud1+3*num
def _edge(c_ld1,c_ud1,dddd):
"(v2+v3-v1-v4) = 2* ld1*ud1, ud1**2=1"
ld1 = X[c_ld1]
ud1 = X[c_ud1]
row = np.tile(np.arange(3*num),6)
col = np.r_[c_v1,c_v2,c_v3,c_v4,np.tile(c_ld1,3),c_ud1]
data = np.r_[dddd,-2*ud1,-2*np.tile(ld1,3)]
r = -2*np.tile(ld1,3)*ud1
H = sparse.coo_matrix((data,(row,col)), shape=(3*num, N))
return H,r
a3 = np.ones(3*num)
d1 = np.r_[-a3,a3,a3,-a3]
d2 = np.r_[-a3,-a3,a3,a3]
He1,re1 = _edge(c_ld1,c_ud1,d1)
He2,re2 = _edge(c_ld2,c_ud2,d2)
Hu1,ru1 = con_unit(X,c_ud1,num,N)
Hu2,ru2 = con_unit(X,c_ud2,num,N)
Ha,ra = con_constangle2(X,c_ud1,c_ud2,N10-1,num,N)
H = sparse.vstack((He1,He2,Hu1,Hu2,Ha))
r = np.r_[re1,re2,ru1,ru2,ra]
if assign:
H0,r0 = con_constl(np.array([N10-1],dtype=int),cos0,1,N)
H = sparse.vstack((H, H0))
r = np.r_[r,r0]
#self.add_iterative_constraint(H*w, r*w, 'isogonal_face_based')
return H*w,r*w
def con_unequal_two_neighbouring_edges(v012,eps,**kwargs):
""" oriented edge1,edge2 l1>=l2 <==> l1^2-l2^2*(1+eps)=s^2
(v1-v)^2-(v2-v)^2*(1+eps) = s^2
"""
w = kwargs.get('nonsymmetric')
mesh = kwargs.get('mesh')
X = kwargs.get('X')
N = kwargs.get('N')
Nnonsym = kwargs.get('Nnonsym')
num = len(v012[0])
c_s = Nnonsym-num+np.arange(num)
c_v = columnnew(v012[0],0, mesh.V)
c_v1 = columnnew(v012[1],0, mesh.V)
c_v2 = columnnew(v012[2],0, mesh.V)
col = np.r_[c_v,c_v1,c_v2,c_s]
row = np.tile(np.arange(num),10)
X0,X1,X2,Xs = X[c_v],X[c_v1],X[c_v2],X[c_s]
data = np.r_[-2*(X1-X0)+2*(X2-X0)*(1+eps),2*(X1-X0),-2*(X2-X0)*(1+eps),-2*Xs]
H = sparse.coo_matrix((data,(row,col)), shape=(num, N))
E1,E2 = (X1-X0).reshape(-1,3,order='F'),(X2-X0).reshape(-1,3,order='F')
r = np.linalg.norm(E1,axis=1)**2-np.linalg.norm(E2,axis=1)**2*(1+eps)
r -= Xs**2
return H*w,r*w
def con_nonsquare_quadface(v012,il12,eps,**kwargs):
""" oriented edge1,edge2 l1 > l2 or l1<l2.
<==> (l1-l2)^2 = s^2 + eps
l1**2 = (v1-v0)^2; l2**2 = (v2-v0)^2
v012 := [v0,v1,v2]
il12 := [il1, il2]
"""
w = kwargs.get('nonsymmetric')
mesh = kwargs.get('mesh')
X = kwargs.get('X')
N = kwargs.get('N')
Nnonsym = kwargs.get('Nnonsym')
c_v = columnnew(v012[0],0, mesh.V)
c_v1 = columnnew(v012[1],0, mesh.V)
c_v2 = columnnew(v012[2],0, mesh.V)
num = len(il12[0])
c_l1 = Nnonsym-mesh.E-num + il12[0]
c_l2 = Nnonsym-mesh.E-num + il12[1]
c_s = Nnonsym-num + np.arange(num)
Xl1,Xl2,Xs = X[c_l1],X[c_l2],X[c_s]
def _ratio():
col = np.r_[c_l1,c_l2,c_s]
row = np.tile(np.arange(num),3)
data = np.r_[2*(Xl1-Xl2),-2*(Xl1-Xl2),-2*Xs]
H = sparse.coo_matrix((data,(row,col)), shape=(num, N))
r = (Xl1-Xl2)**2-Xs**2 + np.ones(num)*eps
return H,r
def _edge(c_l1,c_v0,c_v1):
"l1**2 = (v1-v0)^2"
col = np.r_[c_v0,c_v1,c_l1]
row = np.tile(np.arange(num),7)
data = 2*np.r_[-X[c_v1]+X[c_v0],X[c_v1]-X[c_v0],-X[c_l1]]
H = sparse.coo_matrix((data,(row,col)), shape=(num, N))
r = np.linalg.norm((X[c_v1]-X[c_v0]).reshape(-1,3,order='F'),axis=1)**2
r -= X[c_l1]**2
return H,r
H1,r1 = _ratio()
H2,r2 = _edge(c_l1,c_v,c_v1)
H3,r3 = _edge(c_l2,c_v,c_v2)
H = sparse.vstack((H1, H2, H3))
r = np.r_[r1,r2,r3]
return H*w,r*w
def con_ctrlnet_symmetric_1_diagpoly(another_poly_direction=False,**kwargs):
""" ctrl-quadmesh + 1diagonal form a web:
three families of polylines satisfy symmetric condtion:
ut1,ut2 (unit tangnets of control polylines); ud1 (unit tangent of diagonal)
ut1 and ut2 symmetric to ud1
<==>
ud1 * (ut1-ut2) = 0;
(v1-v3) = l1 * ut1; (v2-v4) = l2 * ut2; (va-vc) = lac * ud1
ut1^2=1; ut2^2=1; ut1^2=1;
X = [lt1,lt2,ut1,ut2; lac,ud1] ##len=1+1+3+3+1+3
"""
w = kwargs.get('ctrlnet_symmetric_1diagpoly')
mesh = kwargs.get('mesh')
X = kwargs.get('X')
N = kwargs.get('N')
num = len(mesh.ind_rr_star_v4f4)
arr,arr3 = np.arange(num), np.arange(3*num)
Ncds = kwargs.get('Ncds')-12*num
c_lt1,c_t1 = Ncds+arr, Ncds+2*num+arr3
c_lt2,c_t2 = c_lt1+num, c_t1+3*num
c_ld1,c_d1 = Ncds+8*num+arr,Ncds+9*num+arr3
_,v1,v2,v3,v4 = mesh.rr_star[mesh.ind_rr_star_v4f4].T
_,va,vb,vc,vd = mesh.rr_star_corner# in diagonal direction
c_v1 = columnnew(v1,0,mesh.V)
c_v2 = columnnew(v2,0,mesh.V)
c_v3 = columnnew(v3,0,mesh.V)
c_v4 = columnnew(v4,0,mesh.V)
if another_poly_direction:
c_va = columnnew(vb,0,mesh.V)
c_vc = columnnew(vd,0,mesh.V)
else:
c_va = columnnew(va,0,mesh.V)
c_vc = columnnew(vc,0,mesh.V)
H1,r1 = con_edge(X,c_v1,c_v3,c_lt1,c_t1,num,N)
H2,r2 = con_edge(X,c_v2,c_v4,c_lt2,c_t2,num,N)
H3,r3 = con_edge(X,c_va,c_vc,c_ld1,c_d1,num,N)
Hu1,ru1 = con_unit(X,c_t1,num,N)
Hu2,ru2 = con_unit(X,c_t2,num,N)
Hu3,ru3 = con_unit(X,c_d1,num,N)
Hs,rs = con_planarity(X,c_t1,c_t2,c_d1,num,N)
H = sparse.vstack((H1, H2, H3, Hu1,Hu2,Hu3,Hs))
r = np.r_[r1,r2,r3,ru1,ru2,ru3,rs]
return H*w,r*w
def con_chebyshev(l0,assign=False,**kwargs):
"""
keeping all edge_length equal
(Vi-Vj)^2 = l^2
if assign:
l == l0
"""
w = kwargs.get('chebyshev')
mesh = kwargs.get('mesh')
X = kwargs.get('X')
N = kwargs.get('N')
N8 = kwargs.get('N8')
V = mesh.V
vi, vj = mesh.vertex_ring_vertices_iterators(order=True)
num = len(vi)
numl = N8-1
c_l = np.tile(numl, num)
c_vi = columnnew(vi,0,V)
c_vj = columnnew(vj,0,V)
data1 = X[c_vi]
data2 = X[c_vj]
col = np.r_[c_vi, c_vj, c_l]
data = 2*np.r_[data1-data2, data2-data1, -X[c_l]]
row = np.tile(np.arange(num),7)
r = np.einsum('ij,ij->i',(data1-data2).reshape(-1,3, order='F'),(data1-data2).reshape(-1,3, order='F')) - X[c_l]**2
H = sparse.coo_matrix((data,(row,col)), shape=(num, N))
if assign:
Hl,rl = con_constl(np.array([numl],dtype=int),np.array([l0]),1,N)
H = sparse.vstack((H, Hl))
r = np.r_[r,rl]
return H*w, r*w
#--------------------------------------------------------------------------
# A-net:
#--------------------------------------------------------------------------
def _con_anet(X,w,c_n,c_v,c_v1,c_v2,c_v3,c_v4,N):
"vn*(vi-v)=0; vn**2=1"
num = int(len(c_v)/3)
H1,r1 = con_planarity(X,c_v,c_v1,c_n,num,N)
H2,r2 = con_planarity(X,c_v,c_v2,c_n,num,N)
H3,r3 = con_planarity(X,c_v,c_v3,c_n,num,N)
H4,r4 = con_planarity(X,c_v,c_v4,c_n,num,N)
Hn,rn = con_unit(X,c_n,num,N)
H = sparse.vstack((H1,H2,H3,H4,Hn))
r = np.r_[r1,r2,r3,r4,rn]
return H*w, r*w
def con_anet(rregular=False,checker_weight=1,id_checker=None,pitch=1,**kwargs): #TODO
""" based on con_unit_edge()
X += [ni]
ni * (vij - vi) = 0
"""
w = kwargs.get('Anet')
mesh = kwargs.get('mesh')
X = kwargs.get('X')
N = kwargs.get('N')
Nanet = kwargs.get('Nanet')
if rregular:
v,v1,v2,v3,v4 = mesh.rr_star[mesh.ind_rr_star_v4f4].T
num=len(mesh.ind_rr_star_v4f4)
else:
num = mesh.num_regular
v,v1,v2,v3,v4 = mesh.ver_regular_star.T
c_n = Nanet-3*num+np.arange(3*num)
c_v = columnnew(v ,0,mesh.V)
c_v1 = columnnew(v1,0,mesh.V)
c_v2 = columnnew(v2,0,mesh.V)
c_v3 = columnnew(v3,0,mesh.V)
c_v4 = columnnew(v4,0,mesh.V)
if rregular and checker_weight<1:
"at red-rr-vs, smaller weight"
wr = checker_weight
iblue,ired = id_checker
ib = columnnew(iblue,0,len(mesh.ind_rr_star_v4f4))
ir = columnnew(ired,0,len(mesh.ind_rr_star_v4f4))
Hb,rb = _con_anet(X,w,c_n[ib],c_v[ib],c_v1[ib],c_v2[ib],c_v3[ib],c_v4[ib],N)
Hr,rr = _con_anet(X,wr,c_n[ir],c_v[ir],c_v1[ir],c_v2[ir],c_v3[ir],c_v4[ir],N)
H = sparse.vstack((Hb,Hr))
r = np.r_[rb,rr]
else:
"all rr-vs, same weight"
H,r = _con_anet(X,w,c_n,c_v,c_v1,c_v2,c_v3,c_v4,N)
if kwargs.get('normal_bar'):
Nbar = kwargs.get('Nbar')
if pitch<0:
c_nbar = Nbar-3*num+np.arange(3*num)-1
annnbar = [c_v,c_n,c_nbar,Nbar-1]
else:
c_nbar = Nbar-3*num+np.arange(3*num)
annnbar = [c_v,c_n,c_nbar]
return H,r, annnbar
return H,r
def con_anet_diagnet(checker_weight=1,id_checker=None,
assign_crpc_ratio=1,pitch=1,**kwargs):
"based on con_unit_edge(diag=True); X += [ni]; ni * (vij - vi) = 0"
w = kwargs.get('Anet_diagnet')
mesh = kwargs.get('mesh')
X = kwargs.get('X')
N = kwargs.get('N')
Nanet = kwargs.get('Nanet')
#c_v,c_v1,c_v2,c_v3,c_v4 = mesh.get_vs_diagonal_v(index=False)
v,v1,v2,v3,v4 = mesh.rr_star_corner
c_v = columnnew(v ,0,mesh.V)
c_v1 = columnnew(v1,0,mesh.V)
c_v2 = columnnew(v2,0,mesh.V)
c_v3 = columnnew(v3,0,mesh.V)
c_v4 = columnnew(v4,0,mesh.V)
num = int(len(c_v)/3)
c_n = Nanet-3*num+np.arange(3*num)
if checker_weight<1:
"at red-rr-vs, smaller weight"
wr = checker_weight
iblue,ired = id_checker
ib = columnnew(iblue,0,len(mesh.ind_rr_star_v4f4))
ir = columnnew(ired,0,len(mesh.ind_rr_star_v4f4))
Hb,rb = _con_anet(X,w,c_n[ib],c_v[ib],c_v1[ib],c_v2[ib],c_v3[ib],c_v4[ib],N)
Hr,rr = _con_anet(X,wr,c_n[ir],c_v[ir],c_v1[ir],c_v2[ir],c_v3[ir],c_v4[ir],N)
H = sparse.vstack((Hb,Hr))
r = np.r_[rb,rr]
else:
"all rr-vs, same weight"
H,r = _con_anet(X,w,c_n,c_v,c_v1,c_v2,c_v3,c_v4,N)
annnbar = None
if kwargs.get('normal_bar'):
N10 = kwargs.get('N10')
Nbar = kwargs.get('Nbar')
if pitch<0:
c_nbar = Nbar-3*num+np.arange(3*num)-1
annnbar = [c_v,c_n,c_nbar,Nbar-1]
else:
c_nbar = Nbar-3*num+np.arange(3*num)
annnbar = [c_v,c_n,c_nbar]
return H*w,r*w,annnbar
if kwargs.get('CRPC'):
"""
quadface: diagonal crossing angle
no additional varibalse; related with e1,e2,given ratio a
a family of constraints:
(1-a) e1*e2 - a-1=0 <==> e1*e2 = (1+a) / (1-a) === cos0
"""
num = mesh.num_quadface
numud = N10-6*num-1
arr = np.arange(num)
c_ud1 = np.r_[numud+arr,numud+num+arr,numud+2*num+arr]
c_ud2 = c_ud1+3*num
col = np.r_[c_ud1,c_ud2]
row = np.tile(arr,6)
data = np.r_[X[c_ud2],X[c_ud1]]
rr = np.einsum('ij,ij->i',X[c_ud1].reshape(-1,3, order='F'),X[c_ud2].reshape(-1,3, order='F'))
a = assign_crpc_ratio
rr += np.ones(num)*(1+a)/(1-a)
Hr = sparse.coo_matrix((data,(row,col)), shape=(num, N))
H = sparse.vstack((H,Hr))
r = np.r_[r,rr]
return H*w,r*w,annnbar
return H,r
#--------------------------------------------------------------------------
# S-net:
#--------------------------------------------------------------------------
def con_snet(orientrn,pitch=None,**kwargs):
w = kwargs.get('Snet')
mesh = kwargs.get('mesh')
X = kwargs.get('X')
N = kwargs.get('N')
Nsnet = kwargs.get('Nsnet')
V = mesh.V
X = X
numv = mesh.num_regular
v0,v1,v2,v3,v4 = mesh.rr_star.T
c_v0 = columnnew(v0,0,V)
c_v1 = columnnew(v1,0,V)
c_v2 = columnnew(v2,0,V)
c_v3 = columnnew(v3,0,V)
c_v4 = columnnew(v4,0,V)
arr1 = np.arange(numv)
arr3 = np.arange(3*numv)
_n1 = Nsnet-11*numv
c_squ, c_a = _n1+np.arange(5*numv),_n1+5*numv+arr1
c_b,c_c,c_d,c_e = c_a+numv,c_a+2*numv,c_a+3*numv,c_a+4*numv
c_a_sqr = c_a+5*numv
def _con_v_square(c_squ):
"[v;v1,v2,v3,v4]=[x,y,z], X[c_squ]=x^2+y^2+z^2"
row_v = np.tile(arr1,3)
row_1 = row_v+numv
row_2 = row_v+2*numv
row_3 = row_v+3*numv
row_4 = row_v+4*numv
row = np.r_[row_v,row_1,row_2,row_3,row_4,np.arange(5*numv)]
col = np.r_[c_v0,c_v1,c_v2,c_v3,c_v4,c_squ]
dv = 2*np.r_[X[c_v0]]
d1 = 2*np.r_[X[c_v1]]
d2 = 2*np.r_[X[c_v2]]
d3 = 2*np.r_[X[c_v3]]
d4 = 2*np.r_[X[c_v4]]
data = np.r_[dv,d1,d2,d3,d4,-np.ones(5*numv)]
H = sparse.coo_matrix((data,(row,col)), shape=(5*numv, N))
def xyz(c_i):
c_x = c_i[:numv]
c_y = c_i[numv:2*numv]
c_z = c_i[2*numv:]
return np.r_[X[c_x]**2+X[c_y]**2+X[c_z]**2]
r = np.r_[xyz(c_v0),xyz(c_v1),xyz(c_v2),xyz(c_v3),xyz(c_v4)]
return H,r
def _con_pos_a(c_a,c_a_sqr):
"a>=0 <---> a_sqr^2 - a = 0"
row = np.tile(arr1,2)
col = np.r_[c_a_sqr, c_a]
data = np.r_[2*X[c_a_sqr], -np.ones(numv)]
r = X[c_a_sqr]**2
H = sparse.coo_matrix((data,(row,col)), shape=(numv, N))
return H,r
def _con_sphere_normalization(c_a,c_b,c_c,c_d,c_e):
"""normalize the sphere equation,
convinent for computing/represent distance\normals
||df|| = b^2+c^2+d^2-4ae=1
"""
row = np.tile(arr1,5)
col = np.r_[c_a,c_b,c_c,c_d,c_e]
data = 2*np.r_[-2*X[c_e],X[c_b],X[c_c],X[c_d],-2*X[c_a]]
r = X[c_b]**2+X[c_c]**2+X[c_d]**2-4*X[c_a]*X[c_e]+np.ones(numv)
H = sparse.coo_matrix((data,(row,col)), shape=(numv, N))
return H,r
def _con_sphere(c_squ,c_a,c_b,c_c,c_d,c_e):
"a(x^2+y^2+z^2)+(bx+cy+dz)+e=0"
row = np.tile(arr1,9)
def __sphere(c_vi,c_sq):
c_x = c_vi[:numv]
c_y = c_vi[numv:2*numv]
c_z = c_vi[2*numv:]
col = np.r_[c_x,c_y,c_z,c_sq,c_a,c_b,c_c,c_d,c_e]
data = np.r_[X[c_b],X[c_c],X[c_d],X[c_a],X[c_sq],X[c_x],X[c_y],X[c_z],np.ones(numv)]
r = X[c_b]*X[c_x]+X[c_c]*X[c_y]+X[c_d]*X[c_z]+X[c_a]*X[c_sq]
H = sparse.coo_matrix((data,(row,col)), shape=(numv, N))
return H,r
H0,r0 = __sphere(c_v0,c_squ[:numv])
H1,r1 = __sphere(c_v1,c_squ[numv:2*numv])
H2,r2 = __sphere(c_v2,c_squ[2*numv:3*numv])
H3,r3 = __sphere(c_v3,c_squ[3*numv:4*numv])
H4,r4 = __sphere(c_v4,c_squ[4*numv:])
H = sparse.vstack((H0,H1,H2,H3,H4))
r = np.r_[r0,r1,r2,r3,r4]
return H,r
def _con_const_radius(c_a,c_r):
"2*ai * r = 1 == df"
c_rr = np.tile(c_r, numv)
row = np.tile(arr1,2)
col = np.r_[c_a, c_rr]
data = np.r_[X[c_rr], X[c_a]]
H = sparse.coo_matrix((data,(row,col)), shape=(numv, N))
r = X[c_rr] * X[c_a] + 0.5*np.ones(numv)
return H,r
def _con_anet(c_a):
row = arr1
col = c_a
data = np.ones(numv)
r = np.zeros(numv)
H = sparse.coo_matrix((data,(row,col)), shape=(numv, N))
return H,r
def _con_orient(c_n,c_o):
"n0x*nx+n0y*ny+n0z*nz-x_orient^2 = 0"
row = np.tile(arr1,4)
col = np.r_[c_n, c_o]
data = np.r_[orientrn.flatten('F'), -2*X[c_o]]
r = -X[c_o]**2
H = sparse.coo_matrix((data,(row,col)), shape=(numv, N))
return H,r
H0,r0 = _con_v_square(c_squ)
H1,r1 = _con_pos_a(c_a,c_a_sqr)
Hn,rn = _con_sphere_normalization(c_a,c_b,c_c,c_d,c_e)
Hs,rs = _con_sphere(c_squ,c_a,c_b,c_c,c_d,c_e)
H = sparse.vstack((H0,H1,Hn,Hs))
r = np.r_[r0,r1,rn,rs]
if kwargs.get('Snet_orient'):
w1 = kwargs.get('Snet_orient')
Ns_n = kwargs.get('Ns_n')
c_n = Ns_n-4*numv+arr3
c_n_sqr = Ns_n-numv+arr1
Ho,ro = _con_orient(c_n,c_n_sqr)
H = sparse.vstack((H, Ho * w1))
r = np.r_[r, ro * w1]
if kwargs.get('Snet_constR'):
w2 = kwargs.get('Snet_constR')
Ns_r = kwargs.get('Ns_r')
c_r = np.array([Ns_r-1],dtype=int)
Hr,rr = _con_const_radius(c_a,c_r)
H = sparse.vstack((H, Hr * w2))
r = np.r_[r, rr * w2]
if kwargs.get('Snet_anet'):
w3 = kwargs.get('Snet_anet')
Ha,ra = _con_anet(c_a)
H = sparse.vstack((H, Ha * w3))
r = np.r_[r, ra * w3]
if kwargs.get('normal_bar'):
"""cen-an=n*r; (n^2=1, not necessary)
cen = -(B,C,D)/2A, r is computed from last iteration
2A*(r* nx + anx) + B = 0
2A*(r* ny + any) + C = 0
2A*(r* nz + anz) + D = 0
"""
Nbar = kwargs.get('Nbar')
annnbar = None
if pitch<0:
c_n = Nbar-6*numv+np.arange(3*numv)-1
c_nbar = Nbar-3*numv+np.arange(3*numv)-1
annnbar = [c_v0,c_n,c_nbar,Nbar-1]
else:
c_n = Nbar-6*numv+np.arange(3*numv)
c_nbar = Nbar-3*numv+np.arange(3*numv)
annnbar = [c_v0,c_n,c_nbar]
cen = -np.c_[X[c_b]/X[c_a],X[c_c]/X[c_a],X[c_d]/X[c_a]]/2
rad1 = np.linalg.norm(cen-X[c_v0].reshape(-1,3,order='F'),axis=1)
rad2 = np.linalg.norm(cen-X[c_v1].reshape(-1,3,order='F'),axis=1)
rad3 = np.linalg.norm(cen-X[c_v2].reshape(-1,3,order='F'),axis=1)
rad4 = np.linalg.norm(cen-X[c_v3].reshape(-1,3,order='F'),axis=1)
rad5 = np.linalg.norm(cen-X[c_v4].reshape(-1,3,order='F'),axis=1)
radii = (rad1+rad2+rad3+rad4+rad5)/5
def _normal(c_a,c_b,c_anx,c_nx):
row = np.tile(np.arange(numv),4)
col = np.r_[c_a,c_b,c_anx,c_nx]
one = np.ones(numv)
data = np.r_[2*(radii*X[c_nx]+X[c_anx]),one,2*X[c_a],2*radii*X[c_a]]
r = 2*X[c_a]*(radii*X[c_nx]+X[c_anx])
H = sparse.coo_matrix((data,(row,col)), shape=(numv, N))
return H,r
Hb,rb = _normal(c_a,c_b,c_v0[:numv],c_n[:numv])
Hc,rc = _normal(c_a,c_c,c_v0[numv:2*numv],c_n[numv:2*numv])
Hd,rd = _normal(c_a,c_d,c_v0[2*numv:],c_n[2*numv:])
Hn,rn = con_unit(X,c_n,numv,N)
H = sparse.vstack((H, Hb, Hc, Hd, Hn))
r = np.r_[r, rb, rc, rd, rn]
return H*w,r*w,annnbar
#self.add_iterative_constraint(H * w, r * w, 'Snet')
return H*w,r*w
def con_snet_diagnet(assign_crpc_ratio,pitch=None,
ck1=False,ck2=False,is_sub=True,
**kwargs):
w = kwargs.get('Snet_diagnet')
mesh = kwargs.get('mesh')
X = kwargs.get('X')
N = kwargs.get('N')
Nsnet = kwargs.get('Nsnet')
X = X
numv = len(mesh.ind_rr_star_v4f4)
if ck1:
numv = len(mesh.ind_ck_rr_vertex[0])
elif ck2:
numv = len(mesh.ind_ck_rr_vertex[1])
arrv1 = np.arange(numv)
c_v,c_cen1,c_cen2,c_cen3,c_cen4 = mesh.get_vs_diagonal_v(ck1=ck1,ck2=ck2,index=False)
c_cen = [c_cen1,c_cen2,c_cen3,c_cen4]
_n1 = Nsnet-11*numv
c_squ, c_a = _n1+np.arange(5*numv),_n1+5*numv+arrv1
c_b,c_c,c_d,c_e = c_a+numv,c_a+2*numv,c_a+3*numv,c_a+4*numv
c_a_sqr = c_a+5*numv
def _con_v_square(c_v,c_cen,c_squ):
"[v;c1,c2,c3,c4]=[x,y,z], X[c_squ]=x^2+y^2+z^2"
c_cen1,c_cen2,c_cen3,c_cen4 = c_cen
row_v = np.tile(arrv1,3)
row_1 = row_v+numv
row_2 = row_v+2*numv
row_3 = row_v+3*numv
row_4 = row_v+4*numv
row = np.r_[row_v,row_1,row_2,row_3,row_4,np.arange(5*numv)]
col = np.r_[c_v,c_cen1,c_cen2,c_cen3,c_cen4,c_squ]
dv = 2*np.r_[X[c_v]]
d1 = 2*np.r_[X[c_cen1]]
d2 = 2*np.r_[X[c_cen2]]
d3 = 2*np.r_[X[c_cen3]]
d4 = 2*np.r_[X[c_cen4]]
data = np.r_[dv,d1,d2,d3,d4,-np.ones(5*numv)]
H = sparse.coo_matrix((data,(row,col)), shape=(5*numv, N))
def xyz(c_i):
c_x = c_i[:numv]
c_y = c_i[numv:2*numv]
c_z = c_i[2*numv:]
return np.r_[X[c_x]**2+X[c_y]**2+X[c_z]**2]
r = np.r_[xyz(c_v),xyz(c_cen1),xyz(c_cen2),xyz(c_cen3),xyz(c_cen4)]
return H,r
def _con_pos_a(c_a,c_a_sqr):
"a>=0 <---> a_sqr^2 - a = 0"
row = np.tile(arrv1,2)
col = np.r_[c_a_sqr, c_a]
data = np.r_[2*X[c_a_sqr], -np.ones(numv)]
r = X[c_a_sqr]**2
H = sparse.coo_matrix((data,(row,col)), shape=(numv, N))
return H,r
def _con_sphere_normalization(c_a,c_b,c_c,c_d,c_e):
"""normalize the sphere equation,
convinent for computing/represent distance\normals
||df|| = b^2+c^2+d^2-4ae=1
"""
row = np.tile(arrv1,5)
col = np.r_[c_a,c_b,c_c,c_d,c_e]
data = 2*np.r_[-2*X[c_e],X[c_b],X[c_c],X[c_d],-2*X[c_a]]
r = X[c_b]**2+X[c_c]**2+X[c_d]**2-4*X[c_a]*X[c_e]+np.ones(numv)
H = sparse.coo_matrix((data,(row,col)), shape=(numv, N))
return H,r
def _con_sphere(c_v,c_cen,c_squ,c_a,c_b,c_c,c_d,c_e):
"a(x^2+y^2+z^2)+(bx+cy+dz)+e=0"
c_cen1,c_cen2,c_cen3,c_cen4 = c_cen
row = np.tile(arrv1,9)
def __sphere(c_vi,c_sq):
c_x = c_vi[:numv]
c_y = c_vi[numv:2*numv]
c_z = c_vi[2*numv:]
col = np.r_[c_x,c_y,c_z,c_sq,c_a,c_b,c_c,c_d,c_e]
data = np.r_[X[c_b],X[c_c],X[c_d],X[c_a],X[c_sq],X[c_x],X[c_y],X[c_z],np.ones(numv)]
r = X[c_b]*X[c_x]+X[c_c]*X[c_y]+X[c_d]*X[c_z]+X[c_a]*X[c_sq]
H = sparse.coo_matrix((data,(row,col)), shape=(numv, N))
return H,r
H0,r0 = __sphere(c_v,c_squ[:numv])
H1,r1 = __sphere(c_cen1,c_squ[numv:2*numv])
H2,r2 = __sphere(c_cen2,c_squ[2*numv:3*numv])
H3,r3 = __sphere(c_cen3,c_squ[3*numv:4*numv])
H4,r4 = __sphere(c_cen4,c_squ[4*numv:])
H = sparse.vstack((H0,H1,H2,H3,H4))
r = np.r_[r0,r1,r2,r3,r4]
return H,r
H0,r0 = _con_v_square(c_v,c_cen,c_squ)
H1,r1 = _con_pos_a(c_a,c_a_sqr)
Hn,rn = _con_sphere_normalization(c_a,c_b,c_c,c_d,c_e)
Hs,rs = _con_sphere(c_v,c_cen,c_squ,c_a,c_b,c_c,c_d,c_e)
H = sparse.vstack((H0,H1,Hn,Hs))
r = np.r_[r0,r1,rn,rs]
def _con_normal(c_n):
cen = -np.c_[X[c_b]/X[c_a],X[c_c]/X[c_a],X[c_d]/X[c_a]]/2
rad1 = np.linalg.norm(cen-X[c_v].reshape(-1,3,order='F'),axis=1)
rad2 = np.linalg.norm(cen-X[c_cen1].reshape(-1,3,order='F'),axis=1)
rad3 = np.linalg.norm(cen-X[c_cen2].reshape(-1,3,order='F'),axis=1)
rad4 = np.linalg.norm(cen-X[c_cen3].reshape(-1,3,order='F'),axis=1)
rad5 = np.linalg.norm(cen-X[c_cen4].reshape(-1,3,order='F'),axis=1)
radii = (rad1+rad2+rad3+rad4+rad5)/5
def _normal(c_a,c_b,c_anx,c_nx):
row = np.tile(np.arange(numv),4)
col = np.r_[c_a,c_b,c_anx,c_nx]
one = np.ones(numv)
data = np.r_[2*(radii*X[c_nx]+X[c_anx]),one,2*X[c_a],2*radii*X[c_a]]
r = 2*X[c_a]*(radii*X[c_nx]+X[c_anx])
H = sparse.coo_matrix((data,(row,col)), shape=(numv, N))
return H,r
Hb,rb = _normal(c_a,c_b,c_v[:numv],c_n[:numv])
Hc,rc = _normal(c_a,c_c,c_v[numv:2*numv],c_n[numv:2*numv])
Hd,rd = _normal(c_a,c_d,c_v[2*numv:],c_n[2*numv:])
Hn,rn = con_unit(X,c_n,numv,N)
H = sparse.vstack((Hb, Hc, Hd, Hn))
r = np.r_[rb, rc, rd, rn]
return H,r
if kwargs.get('normal_bar'):
Nbar = kwargs.get('Nbar')
annnbar = None
if pitch<0:
c_n = Nbar-6*numv+np.arange(3*numv)-1 # unit n
c_nbar = Nbar-3*numv+np.arange(3*numv)-1 # n_bar
annnbar = [c_v,c_n,c_nbar,Nbar-1]
else:
c_n = Nbar-6*numv+np.arange(3*numv) # unit n
c_nbar = Nbar-3*numv+np.arange(3*numv) # n_bar
annnbar = [c_v,c_n,c_nbar]
Hn,rn = _con_normal(c_n)
H = sparse.vstack((H, Hn))
r = np.r_[r, rn]
return H*w,r*w,annnbar
if kwargs.get('snet_geodesic'):
Nbar = kwargs.get('Nbar')
Ns_bi = kwargs.get('Ns_bi')
if kwargs.get('normal_bar'):
"note: here has already include Hn,rn, below add twice"
c_n = Nbar-6*numv+np.arange(3*numv) # unit n
c_bi1 = Ns_bi-6*numv+np.arange(3*numv)
c_bi2 = c_bi1+3*numv
else:
c_n = Ns_bi-9*numv+np.arange(3*numv)
c_bi1 = c_n+3*numv
c_bi2 = c_bi1+3*numv
H1,r1 = con_cross_product2(X,c_v,c_cen1,c_cen3,c_bi1,N)
H2,r2 = con_cross_product2(X,c_v,c_cen2,c_cen4,c_bi2,N)
H3,r3 = con_dot(X,c_bi1,c_n,N)
H4,r4 = con_dot(X,c_bi2,c_n,N)
Hn,rn = _con_normal(c_n)
H = sparse.vstack((H,H1,H2,H3,H4,Hn))
r = np.r_[r,r1,r2,r3,r4,rn]
if kwargs.get('Snet_gi_t'): # no use now!!!
"""
quadface: diagonal crossing angle
X += [ld1,ld2, ud1,ud2, cos00] -- GI-net tangent
1. (v1-v3) = ld1*ud1, ud1**2=1
2. (v2-v4) = ld2*ud2, ud2**2=1
3. ud1*ud2 == cos00
4. (a^2-1)*A*B-(1+a^2)A+(1+a^2)B+1-a^2=0 (ti, gi-ti, given a)
A:=cos0; B:=cos00
"""
N10 = kwargs.get('N10')
Ns_n = kwargs.get('Ns_n')
Ns_git = kwargs.get('Ns_git')
V = mesh.V
X = X
v1,v2,v3,v4 = mesh.rr_quadface.T # in odrder
if is_sub:
"normal from rr-vertex, tangent from inner-quadface"
inn,_ = mesh.get_rr_quadface_boundaryquad_index()
v1,v2,v3,v4 = v1[inn],v2[inn],v3[inn],v4[inn]
num = len(v1)
numl = Ns_git-8*num-1
numud = Ns_git-6*num-1
numn = Ns_n - 3*V
arr = np.arange(num)
c_ld1 = numl+arr
c_ld2 = numl+num+arr
"HERE VERTEX FROM UNIT-NORMAL"
#c_alln = numn + np.arange(3*V)
#X[c_alln] = -mesh.vertex_normals().flatten('F')
subn = mesh.rr_star_corner[0]
c_subn = columnnew(subn,numn,V)
Hn,rn = _con_normal(c_subn)
expn = np.setdiff1d(np.arange(V), subn)
c_expn = columnnew(expn,numn,V)
X[c_expn] = -mesh.vertex_normals()[expn].flatten('F')
c_v1 = numn+np.r_[v1,V+v1,2*V+v1] # [x,y,z]
c_v2 = numn+np.r_[v2,V+v2,2*V+v2] # [x,y,z]
c_v3 = numn+np.r_[v3,V+v3,2*V+v3] # [x,y,z]
c_v4 = numn+np.r_[v4,V+v4,2*V+v4] # [x,y,z]
c_ud1 = np.r_[numud+arr,numud+num+arr,numud+2*num+arr]
c_ud2 = c_ud1+3*num
He1,re1 = con_edge(X,c_v1,c_v3,c_ld1,c_ud1,num,N)
He2,re2 = con_edge(X,c_v2,c_v4,c_ld2,c_ud2,num,N)
Hu1,ru1 = con_unit(X,c_ud1,num,N)
Hu2,ru2 = con_unit(X,c_ud2,num,N)
Ha,ra = con_constangle2(X,c_ud1,c_ud2,Ns_git-1,num,N)
if True:
"1 eq.: (a^2-1)*A*B-(1+a^2)A+(1+a^2)B+1-a^2=0"
a = assign_crpc_ratio
c_cos0,c_cos00 = N10-1, Ns_git-1
col = np.array([c_cos0,c_cos00],dtype=int)
row = np.zeros(2)
d1,d2 = (a**2-1)*X[c_cos00]-a**2-1, (a**2-1)*X[c_cos0]+a**2+1
data = np.array([d1,d2])
rpc = np.array([(a**2-1)*X[c_cos0]*X[c_cos00]+a**2-1])
Hpc = sparse.coo_matrix((data,(row,col)), shape=(1, N))
H = sparse.vstack((H,Hn,He1,He2,Hu1,Hu2,Ha,Hpc))
r = np.r_[r,rn,re1,re2,ru1,ru2,ra,rpc]
# if assign:
# "maybe not useful"
# H0,r0 = con_constl(np.array([Ns_git-1],dtype=int),cos00,1,N)
# H = sparse.vstack((H, H0))
# r = np.r_[r,r0]
#print('n:', np.sum(np.square((Hn*X)-rn)))
# print('e1:', np.sum(np.square((He1*X)-re1)))
# print('u1:', np.sum(np.square((Hu1*X)-ru1)))
# print('a:', np.sum(np.square((Ha*X)-ra)))
# print('pc:', np.sum(np.square((Hpc*X)-rpc)))
#print('all:', np.sum(np.square((H*X)-r)))
if kwargs.get('CRPC'):
"""
quadface: diagonal crossing angle
no additional varibalse; related with e1,e2,given ratio a
a family of constraints:
(1+a) e1*e2 + a-1=0 <==> e1*e2 = (1-a) / (1+a) === cos0
"""
num = mesh.num_quadface
numud = N10-6*num-1
arr = np.arange(num)
c_ud1 = np.r_[numud+arr,numud+num+arr,numud+2*num+arr]
c_ud2 = c_ud1+3*num
col = np.r_[c_ud1,c_ud2]
row = np.tile(arr,6)
data = np.r_[X[c_ud2],X[c_ud1]]
rr = np.einsum('ij,ij->i',X[c_ud1].reshape(-1,3, order='F'),X[c_ud2].reshape(-1,3, order='F'))
a = assign_crpc_ratio
rr += np.ones(num)*(1-a)/(1+a)
Hr = sparse.coo_matrix((data,(row,col)), shape=(num, N))
H = sparse.vstack((H,Hr))
r = np.r_[r,rr]
#self.add_iterative_constraint(H * w, r * w, 'Snet_diagnet')
return H*w,r*w
#--------------------------------------------------------------------------
# G-net:
#--------------------------------------------------------------------------
def con_1geodesic(polyline_direction=False,**kwargs):
""" still depends on the angle condition at vertex-star
default direction: e1*e2-e3*e4=0;
"""
w = kwargs.get('Geodesic')
mesh = kwargs.get('mesh')
X = kwargs.get('X')
N = kwargs.get('N')
N5 = kwargs.get('N5')
num = mesh.num_regular
arr = np.arange(num)
c_ue1 = columnnew(arr,N5-12*num,num)
c_ue2 = columnnew(arr,N5-9*num,num)
c_ue3 = columnnew(arr,N5-6*num,num)
c_ue4 = columnnew(arr,N5-3*num,num)
if polyline_direction:
H,r = con_equal_opposite_angle(X,c_ue2,c_ue3,c_ue4,c_ue1,num,N)
else:
H,r = con_equal_opposite_angle(X,c_ue1,c_ue2,c_ue3,c_ue4,num,N)
return H*w,r*w
def _con_gnet(X,w,c_ue1,c_ue2,c_ue3,c_ue4,N):
num = int(len(c_ue1)/3)
H1,r1 = con_equal_opposite_angle(X,c_ue1,c_ue2,c_ue3,c_ue4,num,N)
H2,r2 = con_equal_opposite_angle(X,c_ue2,c_ue3,c_ue4,c_ue1,num,N)
H, r = sparse.vstack((H1, H2)), np.r_[r1,r2]
return H*w, r*w
def con_gnet(rregular=False,checker_weight=1,id_checker=None,**kwargs):
"""
based on con_unit_edge(diag=False)
e1*e2-e3*e4=0; e2*e3-e1*e4=0
"""
w = kwargs.get('Gnet')
mesh = kwargs.get('mesh')
X = kwargs.get('X')
N = kwargs.get('N')
N5 = kwargs.get('N5')
if rregular:
"function same as below:con_gnet_diagnet"
num=len(mesh.ind_rr_star_v4f4)
else:
num = mesh.num_regular
arr = np.arange(num)
c_ue1 = columnnew(arr,N5-12*num,num)
c_ue2 = columnnew(arr,N5-9*num,num)
c_ue3 = columnnew(arr,N5-6*num,num)
c_ue4 = columnnew(arr,N5-3*num,num)
if rregular and checker_weight<1:
"at red-rr-vs, smaller weight"
wr = checker_weight
iblue,ired = id_checker
ib = columnnew(iblue,0,len(mesh.ind_rr_star_v4f4))
ir = columnnew(ired,0,len(mesh.ind_rr_star_v4f4))
Hb,rb = _con_gnet(X,w,c_ue1[ib],c_ue2[ib],c_ue3[ib],c_ue4[ib],N)
Hr,rr = _con_gnet(X,wr,c_ue1[ir],c_ue2[ir],c_ue3[ir],c_ue4[ir],N)
H = sparse.vstack((Hb,Hr))
r = np.r_[rb,rr]
else:
"all rr-vs, same weight"
H,r = _con_gnet(X,w,c_ue1,c_ue2,c_ue3,c_ue4,N)
return H,r
def con_gnet_diagnet(checker_weight=1,id_checker=None,**kwargs):
"""
based on con_unit_edge(diag=True)
e1*e2-e3*e4=0; e2*e3-e1*e4=0
"""
w = kwargs.get('Gnet_diagnet')
mesh = kwargs.get('mesh')
X = kwargs.get('X')
N = kwargs.get('N')
N5 = kwargs.get('N5')
num = len(mesh.ind_rr_star_v4f4)
arr = np.arange(num)
c_ue1 = columnnew(arr,N5-12*num,num)
c_ue2 = columnnew(arr,N5-9*num,num)
c_ue3 = columnnew(arr,N5-6*num,num)
c_ue4 = columnnew(arr,N5-3*num,num)
if checker_weight<1:
"at red-rr-vs, smaller weight"
wr = checker_weight
iblue,ired = id_checker
ib = columnnew(iblue,0,len(mesh.ind_rr_star_v4f4))
ir = columnnew(ired,0,len(mesh.ind_rr_star_v4f4))
Hb,rb = _con_gnet(X,w,c_ue1[ib],c_ue2[ib],c_ue3[ib],c_ue4[ib],N)
Hr,rr = _con_gnet(X,wr,c_ue1[ir],c_ue2[ir],c_ue3[ir],c_ue4[ir],N)
H = sparse.vstack((Hb,Hr))
r = np.r_[rb,rr]
else:
"all rr-vs, same weight"
H,r = _con_gnet(X,w,c_ue1,c_ue2,c_ue3,c_ue4,N)
return H,r
def con_dog(rregular=False,**kwargs):
"""
based on con_unit_edge() & con_gnet()
e1*e2-e2*e3=0
"""
w = kwargs.get('DOG')
mesh = kwargs.get('mesh')
X = kwargs.get('X')
N = kwargs.get('N')
N5 = kwargs.get('N5')
if rregular:
num=len(mesh.ind_rr_star_v4f4)
else:
num = mesh.num_regular
arr = np.arange(num)
c_ue1 = columnnew(arr,N5-12*num,num)
c_ue2 = columnnew(arr,N5-9*num,num)
c_ue3 = columnnew(arr,N5-6*num,num)
#c_ue4 = columnnew(arr,N5-3*num,num)
H,r = con_equal_opposite_angle(X,c_ue1,c_ue2,c_ue2,c_ue3,num,N)
return H*w,r*w
def con_gonet(rregular=False,is_direction24=False,**kwargs):
""" GEODESIC PARALLEL COORDINATES
based on con_unit_edge() & con_1geodesic
orthogonal: (e1-e3)*(e2-e4) = 0
if direction:
geodesic: e1*e2-e1*e4=0; e2*e3-e3*e4=0;
else:
geodesic: e1*e2-e2*e3=0; e3*e4-e4*e1=0;
"""
w = kwargs.get('GOnet')
mesh = kwargs.get('mesh')
X = kwargs.get('X')
N = kwargs.get('N')
N5 = kwargs.get('N5')
if rregular:
num=len(mesh.ind_rr_star_v4f4)
else:
num = mesh.num_regular
arr = np.arange(num)
c_ue1 = columnnew(arr,N5-12*num,num)
c_ue2 = columnnew(arr,N5-9*num,num)
c_ue3 = columnnew(arr,N5-6*num,num)
c_ue4 = columnnew(arr,N5-3*num,num)
if is_direction24:
H1,r1 = con_equal_opposite_angle(X,c_ue1,c_ue2,c_ue2,c_ue3,num,N)
H2,r2 = con_equal_opposite_angle(X,c_ue3,c_ue4,c_ue4,c_ue1,num,N)
else:
H1,r1 = con_equal_opposite_angle(X,c_ue1,c_ue2,c_ue1,c_ue4,num,N)
H2,r2 = con_equal_opposite_angle(X,c_ue2,c_ue3,c_ue3,c_ue4,num,N)
row = np.tile(arr,12)
col = np.r_[c_ue1,c_ue2,c_ue3,c_ue4]
data = np.r_[X[c_ue2]-X[c_ue4],X[c_ue1]-X[c_ue3],X[c_ue4]-X[c_ue2],X[c_ue3]-X[c_ue1]]
H3 = sparse.coo_matrix((data,(row,col)), shape=(num, N))
r3 = np.einsum('ij,ij->i',(X[c_ue1]-X[c_ue3]).reshape(-1,3, order='F'),(X[c_ue2]-X[c_ue4]).reshape(-1,3, order='F'))
H = sparse.vstack((H1, H2, H3))
r = np.r_[r1, r2, r3]
#print('err:gonet:',np.sum(np.square(H*X-r)))
return H*w,r*w
def con_Voss(**kwargs):
"conjugate geodesic net: planar quads with equal opposite angles"
H1,r1 = con_normal_constraints(**kwargs)
H2,r2 = con_planarity_constraints(**kwargs)
H3,r3 = con_gnet(**kwargs)
H = sparse.vstack((H1,H2,H3))
r = np.r_[r1,r2,r3]
return H,r
#--------------------------------------------------------------------------
# DGPC:
#--------------------------------------------------------------------------
def con_dgpc(rregular=False,polyline_direction=False,**kwargs):
"""main difference here is using patch_matrix to represent all vertices
based on con_unit_edge() & con_gonet
equal parallel_circle_direction edges
each row: (vi-vj)^2 - lij^2 = 0
"""
w = kwargs.get('DGPC')
mesh = kwargs.get('mesh')
X = kwargs.get('X')
N = kwargs.get('N')
Ndgpc = kwargs.get('Ndgpc')
rm = mesh.patch_matrix
if polyline_direction:
rm = rm.T
nrow,ncol = rm.shape
vi,vj = rm[:,:-1].flatten(), rm[:,1:].flatten()
c_vi = columnnew(vi ,0,mesh.V)
c_vj = columnnew(vj ,0,mesh.V)
c_l = (Ndgpc-nrow+np.arange(nrow)).repeat(ncol-1)
H,r = con_diagonal(X,c_vi,c_vj,c_l,nrow*(ncol-1),N)
return H*w,r*w
#--------------------------------------------------------------------------
# AAG / GGA-net:
#--------------------------------------------------------------------------
def _con_agnet_liouville(c_geo,num,angle=90,is_angle=False,**kwargs):
""" X +=[ll1,ll2,ll3,ll4,u1,u2; lu1,tu1]
+=[lu2,tu2; lla,llc,g1, lg1,tg1, c]
orthgonal A-net & constant angle with diagonal crv.
2asymptotics: v1 -- v -- v3 & v2 -- v -- v4
orthogonal: u1 = l1**2*(V3-V0) - l3**2*(V1-V0)
u2 = l2**2*(V4-V0) - l4**2*(V2-V0)
u1 * v1 = 0 (no need to be unit)
1geodesic: a --- v --- c
g1 = la**2*(Vc-V0) - lc**2*(Va-V0)
const.angle: u1 = tu1 * lu1
g1 = tg1 * lg1
tu1 * tg1 = const.
"""
X = kwargs.get('X')
N = kwargs.get('N')
Noscut = kwargs.get('Noscut')
c_v,c_v1,c_v3,c_lu1,c_tu1,c_lla,c_llc,c_lg,c_g1,c_lg1,c_tg1,c_c = c_geo
H2,r2 = con_osculating_tangent(X,c_v,c_v1,c_v3,c_lla,c_llc,c_lg,c_g1,num,N)
"Unit 1asym tangent vector u1 = tu1 * lu1 :"
c_u1 = Noscut-10*num+4*num+np.arange(3*num)
H3,r3 = con_unit_vector(X,c_u1,c_tu1,c_lu1,num,N)
"Unit 1geo tangent vector g1 = tg1 * lg1 :"
H4,r4 = con_unit_vector(X,c_g1,c_tg1,c_lg1,num,N)
"Constant angle with 1geo and 1asym crv.: "
H5,r5 = con_constangle2(X,c_tu1,c_tg1,c_c,num,N)
H = sparse.vstack((H2,H3,H4,H5))
r = np.r_[r2,r3,r4,r5]
if is_angle:
cos0 = np.cos(angle/180.0*np.pi)
H0,r0 = con_constl(np.array([c_c],dtype=int),cos0,1,N)
Ha,ra = con_constangle(X,c_tu1,c_tg1,cos0,num,N)
H = sparse.vstack((H, H0, Ha))
r = np.r_[r,r0,ra]
return H,r
def _con_agnet_planar_geodesic(ver_poly_strip,strong=False,**kwargs):
""" X +=[ni]
along each i-th geodesic: ni * (vij-vik) = 0; k=j+1,j=0,...
refer: self.get_poly_strip_normal()
if strong:
ni * anet_n = 0
"""
mesh = kwargs.get('mesh')
X = kwargs.get('X')
N = kwargs.get('N')
Ndgeo = kwargs.get('Ndgeo')
Ndgeopc = kwargs.get('Ndgeopc')
iall,iind = ver_poly_strip
num = len(iall)
arr = Ndgeopc-3*num+np.arange(3*num)
c_nx,c_ny,c_nz = arr[:num],arr[num:2*num],arr[2*num:3*num]
col=row=data=r = np.array([])
k,i = 0,0
for iv in iall:
va,vb = iv[:-1],iv[1:]
m = len(va)
c_a = columnnew(va,0,mesh.V)
c_b = columnnew(vb,0,mesh.V)
c_ni = np.r_[np.tile(c_nx[i],m),np.tile(c_ny[i],m),np.tile(c_nz[i],m)]
coli = np.r_[c_a,c_b,c_ni]
rowi = np.tile(np.arange(m),9) + k
datai = np.r_[X[c_ni],-X[c_ni],X[c_a]-X[c_b]]
ri = np.einsum('ij,ij->i',X[c_ni].reshape(-1,3,order='F'),(X[c_a]-X[c_b]).reshape(-1,3,order='F'))
col = np.r_[col,coli]
row = np.r_[row,rowi]
data = np.r_[data,datai]
r = np.r_[r,ri]
k += m
i += 1
H = sparse.coo_matrix((data,(row,col)), shape=(k, N))
H1,r1 = con_unit(X,arr,num,N)
H = sparse.vstack((H,H1))
r = np.r_[r,r1]
if strong:
"planar_geodesic = PC crv. if strong: normal_planar=PQ"
num = len(mesh.ind_rr_star_v4f4)
move = Ndgeo-6*num
col=row=data= np.array([])
k,i = 0,0
for iv in iind:
iv=np.array(iv)
m = len(iv)
c_an = move+np.r_[iv,iv+num,iv+2*num]
c_ni = np.r_[np.tile(c_nx[i],m),np.tile(c_ny[i],m),np.tile(c_nz[i],m)]
coli = np.r_[c_an,c_ni]
rowi = np.tile(np.arange(m),6) + k
datai = np.r_[X[c_ni],X[c_an]]
ri = np.einsum('ij,ij->i',X[c_ni].reshape(-1,3,order='F'),X[c_an].reshape(-1,3,order='F'))
col = np.r_[col,coli]
row = np.r_[row,rowi]
data = np.r_[data,datai]
r = np.r_[r,ri]
k += m
i += 1
H0 = sparse.coo_matrix((data,(row,col)), shape=(k, N))
H = sparse.vstack((H,H0))
return H,r
def con_anet_geodesic(ver_poly_strip,another_poly_direction=False,
checker_weight=1,id_checker=None,
**kwargs):
"""Anet(Gnet) with diagonal geodesic/asymptotic project:
d 4 c
1 v 3
a 2 b
if AAG:
control net (v,1,2,3,4) is Anet, (a-v-c or b-v-d) is geodesic
elif GAA:
diagonal net (v,a,b,c,d) is Anet, (1-v-3 or 2-v-4) is geodesic
elif GGA:
control net (v,1,2,3,4) is Gnet, (a-v-c or b-v-d) is asymptotic
elif AGG:
diagonal net (v,a,b,c,d) is Gnet, (1-v-3 or 2-v-4) is asymptotic
if AAG/GAA:
X += [ni]+[Ni];
ni: vertex-normal from Anet;
Ni: osculating normal of geodesic
<==> from Anet/Anet_diagnet: ni*(vi-v)=0,(i=1,2,3,4), ni^2=1;
*geodesic: Ni=(Vc-V) x (Va-V); ni * Ni = 0
elif GGA/AGG:
X += [Ni,No1,No2];
Ni: vertex-normal of Gnet;
No1,No2: two oscualting normals of G-net
<==> Way1 (guess has problem):
Gnet/Gnet_diagnet: li*ei=vi-v,ei^2=1 (i=1,2,3,4);
bisecting: ni*(e1-e3)= ni*(e2-e4)=0; ni^2=1;
asymptotic: ni*(va-v)=ni*(vc-v)=0
*Way2 (using this):
*Ni^2=1; Ni*No1,No2,va-v,vc-v=0;
*No1=(vc-v) x (va-v), No2=(vd-v) x (vb-v)
elif AAGG/GGAA:
X += [ni] + [No1,No2];
ni: vertex-normal from Anet;
No1,No2: two oscualting normals of G-net
<==> *No1=(vc-v) x (va-v), No2=(vd-v) x (vb-v); ni*No1,No2=0
# from constraints_net import con_unit_edge, con_anet,con_anet_diagnet,
# con_gnet,con_gnet_diagnet
checker constraints:
blue for hard constraint;
red for soft..with lower checker_weight
id_checker=[iblue,ired]; len(id_checker)==len(ind_rr_star_v4f4)
rr_star[ind_rr_star_v4f4][id_checker[0]] =\in= vblue
rr_star[ind_rr_star_v4f4][id_checker[1]] =\in= vred
"""
w_aag = kwargs.get('AAGnet')
w_gaa = kwargs.get('GAAnet')
w_gga = kwargs.get('GGAnet')
w_agg = kwargs.get('AGGnet')
w_aagg = kwargs.get('AAGGnet')
w_ggaa = kwargs.get('GGAAnet')
mesh = kwargs.get('mesh')
X = kwargs.get('X')
N = kwargs.get('N')
Nanet = kwargs.get('Nanet') # for AAG, AAGG
Ndgeo = kwargs.get('Ndgeo')
num=len(mesh.ind_rr_star_v4f4)
arr3 = np.arange(3*num)
if id_checker is not None:
"if checker_weight<1, checker_weight for red-vertex; w1,..,w6 for blue"
iblue,ired = id_checker
ib = columnnew(iblue,0,len(mesh.ind_rr_star_v4f4))
ir = columnnew(ired,0,len(mesh.ind_rr_star_v4f4))
v,v1,v2,v3,v4 = mesh.rr_star[mesh.ind_rr_star_v4f4].T
v,va,vb,vc,vd = mesh.rr_star_corner# in diagonal direction
c_v = columnnew(v,0,mesh.V)
c_1 = columnnew(v1,0,mesh.V)
c_2 = columnnew(v2,0,mesh.V)
c_3 = columnnew(v3,0,mesh.V)
c_4 = columnnew(v4,0,mesh.V)
c_a = columnnew(va,0,mesh.V)
c_b = columnnew(vb,0,mesh.V)
c_c = columnnew(vc,0,mesh.V)
c_d = columnnew(vd,0,mesh.V)
c_n = Nanet-3*num+np.arange(3*num) # for AAG, AAGG
def _1geo(X,w,c_v,c_a,c_c,c_an,c_on):
"on = (Vc-V) x (Va-V); an * on = 0"
H1,r1 = con_cross_product2(X,c_v,c_c,c_a,c_on,N)
H2,r2 = con_dot(X,c_an,c_on,N)
H = sparse.vstack((H1,H2))
r = np.r_[r1,r2]
return H*w, r*w
def _1asym(X,w,c_v,c_a,c_c,c_n):
"*asymptotic: ni*(va-v)=ni*(vc-v)=0"
num = int(len(c_v)/3)
H1,r1 = con_planarity(X,c_v,c_a,c_n,num,N)
H2,r2 = con_planarity(X,c_v,c_c,c_n,num,N)
Hu,ru = con_unit(X,c_n,num,N)
H = sparse.vstack((H1,H2,Hu))
r = np.r_[r1,r2,ru]
return H*w, r*w
def _gga(X,w,c_v,c_g1,c_g2,c_g3,c_g4,c_l,c_r,c_n,c_on1,c_on2):
Ha,ra = _1asym(X,w,c_v,c_l,c_r,c_n)
Ho1,ro1 = _1geo(X,w,c_v,c_g1,c_g3,c_n,c_on1)
Ho2,ro2 = _1geo(X,w,c_v,c_g2,c_g4,c_n,c_on2)
H = sparse.vstack((Ha,Ho1,Ho2))
r = np.r_[ra,ro1,ro2]
return H,r
def _aagg(X,w,c_v,c_g1,c_g2,c_g3,c_g4,c_a1,c_a2,c_a3,c_a4,c_n,c_on1,c_on2):
Ha1,ra1 = _1asym(X,w,c_v,c_a1,c_a3,c_n)
Ha2,ra2 = _1asym(X,w,c_v,c_a2,c_a4,c_n)
Ho1,ro1 = _1geo(X,w,c_v,c_g1,c_g3,c_n,c_on1)
Ho2,ro2 = _1geo(X,w,c_v,c_g2,c_g4,c_n,c_on2)
H = sparse.vstack((Ha1,Ha2,Ho1,Ho2))
r = np.r_[ra1,ra2,ro1,ro2]
return H,r
wr = checker_weight
if w_aag or w_gaa:
"""X += [ni]+[Ni];
ni: vertex-normal from Anet;
Ni: osculating normal of geodesic
<==> from Anet/Anet_diagnet: ni*(vi-v)=0,(i=1,2,3,4), ni^2=1;
*geodesic: Ni=(Vc-V) x (Va-V); ni * Ni = 0
"""
wag = max(w_aag,w_gaa)
c_on = Ndgeo-3*num + arr3
if w_aag:
if another_poly_direction:
c_l,c_r = c_b, c_d
else:
c_l,c_r = c_a, c_c
elif w_gaa:
if another_poly_direction:
c_l,c_r = c_2, c_4
else:
c_l,c_r = c_1, c_3
if checker_weight<1:
"at red-rr-vs, smaller weight"
Hb,rb = _1geo(X,wag,c_v[ib],c_l[ib],c_r[ib],c_n[ib],c_on[ib])
Hr,rr = _1geo(X,wr,c_v[ir],c_l[ir],c_r[ir],c_n[ir],c_on[ir])
H = sparse.vstack((Hb,Hr))
r = np.r_[rb,rr]
else:
"all rr-vs, same weight"
H,r = _1geo(X,wag,c_v,c_l,c_r,c_n,c_on)
elif w_gga or w_agg:
"""X += [Ni,No1,No2];
Ni: vertex-normal of Anet;
No1,No2: two oscualting normals of G-net
<==>Ni^2=1; Ni*No1,No2,va-v,vc-v=0;
No1=(vc-v) x (va-v), No2=(vd-v) x (vb-v)
"""
wag = max(w_gga,w_agg)
c_n = Ndgeo-9*num + arr3
c_on1,c_on2 = c_n + 3*num,c_n + 6*num
if w_gga:
c_g1,c_g2,c_g3,c_g4 = c_1,c_2,c_3,c_4
if another_poly_direction:
c_l,c_r = c_b, c_d
else:
c_l,c_r = c_a, c_c
elif w_agg:
c_g1,c_g2,c_g3,c_g4 = c_a,c_b,c_c,c_d
if another_poly_direction:
c_l,c_r = c_2, c_4
else:
c_l,c_r = c_1, c_3
if checker_weight<1:
"at red-rr-vs, smaller weight"
Hb,rb = _gga(X,wag,c_v[ib],c_g1[ib],c_g2[ib],c_g3[ib],c_g4[ib],c_l[ib],c_r[ib],c_n[ib],c_on1[ib],c_on2[ib])
Hr,rr = _gga(X,wr,c_v[ir],c_g1[ir],c_g2[ir],c_g3[ir],c_g4[ir],c_l[ir],c_r[ir],c_n[ir],c_on1[ir],c_on2[ir])
H = sparse.vstack((Hb,Hr))
r = np.r_[rb,rr]
else:
"all rr-vs, same weight"
H,r = _gga(X,wag,c_v,c_g1,c_g2,c_g3,c_g4,c_l,c_r,c_n,c_on1,c_on2)
elif w_aagg or w_ggaa:
""" X += [ni] + [No1,No2];
ni: vertex-normal from Anet;
No1,No2: two oscualting normals of G-net
<==> *No1=(vc-v) x (va-v), No2=(vd-v) x (vb-v); ni*No1,No2=0
"""
wag = max(w_aagg,w_ggaa)
c_on1 = Ndgeo-6*num + arr3
c_on2 = c_on1 + 3*num
if w_aagg:
c_g1,c_g2,c_g3,c_g4 = c_a,c_b,c_c,c_d ##different from above
c_a1,c_a2,c_a3,c_a4 = c_1,c_2,c_3,c_4
elif w_ggaa:
c_g1,c_g2,c_g3,c_g4 = c_1,c_2,c_3,c_4 ##different from above
c_a1,c_a2,c_a3,c_a4 = c_a,c_b,c_c,c_d
if checker_weight<1:
"at red-rr-vs, smaller weight"
Hb,rb = _aagg(X,wag,c_v[ib],c_g1[ib],c_g2[ib],c_g3[ib],c_g4[ib],
c_a1[ib],c_a2[ib],c_a3[ib],c_a4[ib],
c_n[ib],c_on1[ib],c_on2[ib])
Hr,rr = _aagg(X,wr,c_v[ir],c_g1[ir],c_g2[ir],c_g3[ir],c_g4[ir],
c_a1[ir],c_a2[ir],c_a3[ir],c_a4[ir],
c_n[ir],c_on1[ir],c_on2[ir])
H = sparse.vstack((Hb,Hr))
r = np.r_[rb,rr]
else:
"all rr-vs, same weight"
H,r = _aagg(X,wag,c_v,c_g1,c_g2,c_g3,c_g4,c_a1,c_a2,c_a3,c_a4,
c_n,c_on1,c_on2)
w5 = kwargs.get('agnet_liouville') # no need now.
w6 = kwargs.get('planar_geodesic') # no need now.
Ndgeoliou = kwargs.get('Ndgeoliou')
if w5: # no need now.
"X +=[lu1,tu1; lla,llc,g1, lg1,tg1]"
arr = np.arange(num)
n = Ndgeoliou - 13*num -1
c_lu1 = n+arr
c_tu1 = n+num+arr3
c_lla = n+4*num+arr
c_llc = c_lla+num
c_g1 = n+6*num+arr3
c_lg1 = n+9*num+arr
c_tg1 = n+10*num+arr3
c_const = Ndgeoliou - 1
#c_geo = [c_lu1,c_tu1,c_lla,c_llc,c_g1,c_lg1,c_tg1,c_const]
if w_gaa:
if another_poly_direction:
c_geo = [c_v,c_2,c_4,c_lu1,c_tu1,c_lla,c_llc,c_g1,c_lg1,c_tg1,c_const]
else:
c_geo = [c_v,c_1,c_3,c_lu1,c_tu1,c_lla,c_llc,c_g1,c_lg1,c_tg1,c_const]
elif w_aag:
if another_poly_direction:
c_geo = [c_v,c_b,c_d,c_lu1,c_tu1,c_lla,c_llc,c_g1,c_lg1,c_tg1,c_const]
else:
c_geo = [c_v,c_a,c_c,c_lu1,c_tu1,c_lla,c_llc,c_g1,c_lg1,c_tg1,c_const]
H0,r0 = _con_agnet_liouville(c_geo,num,**kwargs)
H = sparse.vstack((H,H0))
r = np.r_[r,r0]
if w6: # no need now.
H0,r0 = _con_agnet_planar_geodesic(ver_poly_strip,**kwargs)
H = sparse.vstack((H,H0))
r = np.r_[r,r0]
return H,r
def con_AGnet(is_ag_or_ga=True,is_ortho=False,
is_const_r=False,is_unique_r=False,**kwargs):
""" based on pre-defined osculating_tangent: con_osculating_tangents()
X +=[ll1,ll2,ll3,ll4,lt1,lt2,t1,t2]
v1-v-v3:
lt*t = l1**2*(V3-V0) - l3**2*(V1-V0)
t^2=1
<===>
ll1 (= l1**2) = (V1-V0)^2
ll3 (= l3**2) = (V3-V0)^2
ll1 * (v3-v0) - ll3 * (v1-v0) - t*lt = 0
t^2=1
asymptotic v1-v-v3; geodesic v2-v-v4;
X += [surfN; ogN]
unit surfN // principalnormal of geodesic _|_ edges of asymptotic
constraints:
1. surfN^2=1
2. surfN * t2 = 0;
3. surfN * (v1-v) = 0
4. surfN * (v3-v) = 0
5. ogN^2=1
6. ogN * (v2-v) = 0
7. ogN * (v4-v) = 0
8. ogN * surfN = 0
if ortho.
t1 * t2 = 0
if const.r.
X+=[Ri],each geodesic assigned Ri=ri^2, or 1 whole R=const.r^2
(v1-v)^2 = 4*[(v1-v)*surfN/|v1-v|]^2 *r^2
<==> ll1 = 4*[]^2 * r^2
<==> ll1^2 = 4* C *R, C:= [(v1-v)*surfN]^2
"""
mesh = kwargs.get('mesh')
X = kwargs.get('X')
N = kwargs.get('N')
Noscut = kwargs.get('Noscut')
Nag = kwargs.get('Nag')
wag = kwargs.get('AGnet')
#igeo = mesh.igeopoly#TODO
num=len(mesh.ind_rr_star_v4f4)
arr,arr3 = np.arange(num),np.arange(3*num)
if is_const_r or is_unique_r:
if is_const_r:
pass
# k = len(igeo)
# c_ri = Nag-k+np.arange(k)
# c_srfN = Nag-6*num+arr3-k
# c_ogN = Nag-4*num+arr3-k
elif is_unique_r:
c_r = Nag-1
c_srfN = Nag-6*num+arr3-1
c_ogN = Nag-4*num+arr3-1
else:
c_srfN = Nag-6*num+arr3
c_ogN = Nag-3*num+arr3
n = Noscut - 12*num
c_ll1,c_ll2 = n+arr,n+arr+num
c_t1,c_t2 = n+6*num+arr3, n+9*num+arr3
v,v1,v2,v3,v4 = mesh.rr_star[mesh.ind_rr_star_v4f4].T
c_v = columnnew(v,0,mesh.V)
c_1 = columnnew(v1,0,mesh.V)
c_2 = columnnew(v2,0,mesh.V)
c_3 = columnnew(v3,0,mesh.V)
c_4 = columnnew(v4,0,mesh.V)
if is_ag_or_ga:
"asy(1-v-3), geo(2-v-4)"
c1,c2,c3,c4 = c_1,c_2,c_3,c_4
else:
"asy(2-v-4), geo(1-v-3)"
c1,c2,c3,c4 = c_2,c_1,c_4,c_3
c_ll1,c_ll2 = c_ll2,c_ll1
c_t1,c_t2 = c_t2,c_t1
def _AG():
"surfN^2=1"
H1,r1 = con_unit(X,c_srfN,num,N)
"surfN * t2 = 0;"
H2,r2 = con_dot(X,c_t2,c_srfN,N)
"surfN*(v1-v)=0; surfN*(v3-v)=0;"
H3,r3 = con_planarity(X,c_v,c1,c_srfN,num,N)
H4,r4 = con_planarity(X,c_v,c3,c_srfN,num,N)
"ogN^2=1; ogN*(v2-v)=0; ogN*(v4-v)=0"
H5,r5 = con_unit(X,c_ogN,num,N)
H6,r6 = con_planarity(X,c_v,c2,c_ogN,num,N)
H7,r7 = con_planarity(X,c_v,c4,c_ogN,num,N)
"ogN * surfN = 0"
H8,r8 = con_dot(X,c_srfN,c_ogN,N)
H = sparse.vstack((H1,H2,H3,H4,H5,H6,H7,H8))
r = np.r_[r1,r2,r3,r4,r5,r6,r7,r8]
#print('err:1:',np.sum(np.square(H1*X-r1)))
#print('err:2:',np.sum(np.square(H2*X-r2)))
print('err:3:',np.sum(np.square(H3*X-r3)))
print('err:4:',np.sum(np.square(H4*X-r4)))
# print('err:5:',np.sum(np.square(H5*X-r5)))
# print('err:6:',np.sum(np.square(H6*X-r6)))
# print('err:7:',np.sum(np.square(H7*X-r7)))
# print('err:8:',np.sum(np.square(H8*X-r8)))
return H,r
H,r = _AG()
if is_ortho:
"t1 * t2 = 0"
Ho,ro = con_dot(X,c_t1,c_t2,N)
H = sparse.vstack((H,Ho))
r = np.r_[r,ro]
#print('err:o:',np.sum(np.square(Ho*X-ro)))
if is_const_r or is_unique_r:
if is_const_r:
"num_m is the num of geodesic"
pass
elif is_unique_r:
"ll1^2 = 4* C *R, C:= [(v1-v)*surfN]^2"
VV1 = (X[c1]-X[c_v]).reshape(-1,3,order='F')
srfN = X[c_srfN].reshape(-1,3,order='F')
C = np.einsum('ij,ij->i',VV1,srfN)**2
col = np.r_[c_ll1,np.ones(num,dtype=int)*c_r]
row = np.tile(arr,2)
data = np.r_[2*X[c_ll1],-4*C]
rr = X[c_ll1]**2
Hr = sparse.coo_matrix((data,(row,col)), shape=(num, N))
print('err:r:',np.sum(np.square(Hr*X-rr)))
H = sparse.vstack((H,Hr))
r = np.r_[r,rr]
return H*wag,r*wag
def con_singular_Anet_diag_geodesic(singular_polylist,ind_anet,**kwargs):
"for singular A-net, 1family diagonals are geodesic"
mesh = kwargs.get('mesh')
X = kwargs.get('X')
N = kwargs.get('N')
Ndgeo = kwargs.get('Ndgeo')
Nanet = kwargs.get('Nanet')
vl,vc,vr = singular_polylist
c_l = columnnew(vl,0,mesh.V)
c_v = columnnew(vc,0,mesh.V)
c_r = columnnew(vr,0,mesh.V)
num = len(vc)
arr3 = np.arange(3*num)
c_on = Ndgeo-3*num + arr3
num_anet=len(mesh.ind_rr_star_v4f4)
#c_anet_n = columnnew(ind_anet,Nanet-3*num_anet,num_anet)
c_anet_n = np.r_[ind_anet,ind_anet+num_anet,ind_anet+2*num_anet]+Nanet-3*num_anet
#print(len(c_on),len(c_anet_n),num,num_anet)
def _1geo(c_v,c_a,c_c,c_an,c_on):
"based on control-net==A-net"
"an:Anet-normal; on:Osculating-normal"
"on=(Vc-V)x(Va-V)"
H1,r1 = con_cross_product2(X,c_v,c_a,c_c,c_on,N)
"an*on=0"
H2,r2 = con_dot(X,c_an,c_on,N)
H = sparse.vstack((H1,H2))
r = np.r_[r1,r2]
return H,r
H,r = _1geo(c_v,c_l,c_r,c_anet_n,c_on)
return H,r
def con_diag_1_asymptotic_or_geodesic(singular_polylist=None,
ind_rrv=None,
another_poly_direction=False,
is_asym_or_geod = True,
**kwargs):
""" normal at vs from control-mesh two polylines tangents: t1 x t2 // N
default direction: va-v-vc;
else: vb-v-vd
common:
<==> (v3-v1) x (v4-v2) = un * l; un^2=1
if asymptotic:
X += [v4N] ##len=[3]
<==> uN * (va-v) = uN * (vc-v) == 0
elif geodesic:
X += [v4N,la,lc,ea,ec] ##len=[3+1+1+3+3]
<==> uN x (ea+ec) == 0;
(va-v) = la * ea; (vc-v) = lc * ec; ea^2=1; ec^2=1;
"""
mesh = kwargs.get('mesh')
X = kwargs.get('X')
N = kwargs.get('N')
num = len(mesh.ind_rr_star_v4f4)
arr,arr3 = np.arange(num), np.arange(3*num)
v,v1,v2,v3,v4 = mesh.rr_star[mesh.ind_rr_star_v4f4].T
c_v = columnnew(v,0,mesh.V)
c_v1 = columnnew(v1,0,mesh.V)
c_v2 = columnnew(v2,0,mesh.V)
c_v3 = columnnew(v3,0,mesh.V)
c_v4 = columnnew(v4,0,mesh.V)
if singular_polylist is not None:
vl,vc,vr = singular_polylist
c_v0 = columnnew(vc,0,mesh.V)
c_vl = columnnew(vl,0,mesh.V)
c_vr = columnnew(vr,0,mesh.V)
ind = ind_rrv
ind3 = columnnew(ind,0,num)
if is_asym_or_geod:
"uN * (va-v) = uN * (vc-v) == 0"
w = kwargs.get('diag_1_asymptotic')
Ncd = kwargs.get('Ncd')-4*num
c_l,c_n = Ncd+arr, Ncd+num+arr3
H1,r1 = con_planarity(X,c_vl,c_v0,c_n[ind3],len(ind),N)#change
H2,r2 = con_planarity(X,c_vr,c_v0,c_n[ind3],len(ind),N)#change
H = sparse.vstack((H1,H2))
r = np.r_[r1,r2]
else:
"uN x (ea+ec) == 0;(va-v) = la * ea; (vc-v) = lc * ec; ea^2=1; ec^2=1;"
w = kwargs.get('diag_1_geodesic')
num_ind = len(ind)#new
arr_ind,arr3_ind = | np.arange(num_ind) | numpy.arange |
import pdb
import time
import os
import subprocess
import re
import random
import json
import numpy as np
import glob
from tensorboard.backend.event_processing.event_accumulator import EventAccumulator
import socket
import argparse
import threading
import _thread
import signal
from datetime import datetime
import csv
from sklearn import neighbors
import gpu_pwr
parser = argparse.ArgumentParser(description='TCP client')
parser.add_argument('--tc', metavar='TESTCASE', type=str, help='select testcase')
args = parser.parse_args()
with open('job_queue_50.json', 'r') as fp: #TODO
queue = json.load(fp)
queue_dict = {}
arrival_time = 0
for item in queue:
arrival_time += np.random.poisson(30)
queue_dict[item] = arrival_time
queue_timer = time.time()
queue_delay = {}
for item in queue:
queue_delay[str(item)] = 0
job_start = {} #{'49': time1, '15': time2...}
JCT = {}
for item in queue:
JCT[str(item)] = 0
completion = {}
for item in queue:
completion[str(item)] = 0
overhead = {} # initialize so that every job starts with 0s overhead time
for item in queue:
overhead[str(item)] = 0
ovhd_start = {} # initialize this to 0 as well
for item in queue:
ovhd_start[str(item)] = 0
b_start = {} # initialize this to 0 as well
for item in queue:
b_start[str(item)] = 0
c_start = {} # initialize this to 0 as well
for item in queue:
c_start[str(item)] = 0
d_start = {} # initialize this to 0 as well
for item in queue:
d_start[str(item)] = 0
ovhd_a = {} # {1: [10, 12, ...], 2: [xx]}
for item in queue:
ovhd_a[str(item)] = []
ovhd_b = {} # {1: [10, 12, ...], 2: [xx]}
for item in queue:
ovhd_b[str(item)] = []
ovhd_c = {} # {1: [10, 12, ...], 2: [xx]}
for item in queue:
ovhd_c[str(item)] = []
ovhd_d = {} # {1: [10, 12, ...], 2: [xx]}
for item in queue:
ovhd_d[str(item)] = []
ovhd_total = {} # {1: [10, 12, ...], 2: [xx]}
for item in queue:
ovhd_total[str(item)] = []
k80_1st = {}
for item in queue:
k80_1st[str(item)] = []
v100_1st = {}
for item in queue:
v100_1st[str(item)] = []
num_mig = {} # initialize migration time to 0
for item in queue:
num_mig[str(item)] = 0
queue_start = {} # initialize this to 0 as well
for item in queue:
queue_start[str(item)] = 0
queue_time = {} # initialize this to 0 as well
for item in queue:
queue_time[str(item)] = 0
V100_epoch_time = {}
for item in queue:
V100_epoch_time[str(item)] = 0
K80_epoch_time = {}
for item in queue:
K80_epoch_time[str(item)] = 0
K80_start_time = {}
for item in queue:
K80_start_time[str(item)] = 0
V100_start_time = {}
for item in queue:
V100_start_time[str(item)] = 0
promote_start_time = {}
for item in queue:
promote_start_time[str(item)] = 0
demote_list = []
K80_time = {}
for item in queue:
K80_time[str(item)] = 0
V100_time = {}
for item in queue:
V100_time[str(item)] = 0
gpu_usage_time = [] # don't initialize this
gpu_usage = []
gpu_usage_completion = []
speedup_dict = {}
for item in queue:
speedup_dict[str(item)] = 0
predict_dict = {}
for item in queue:
predict_dict[str(item)] = 0
birthplace = {}
for item in queue:
birthplace[str(item)] = 'none'
index = 0
all_jobs_started = False
K80_cap = 8 #TODO
V100_cap = 4
K80_used = 0
V100_used = 0
K80_job = {}
for i in range(K80_cap):
K80_job[str(i)] = 'idle'
V100_job = {}
for i in range(V100_cap):
V100_job[str(i)] = 'idle'
qualified_job = []
step1_job = []
step2_job = []
pc_job = []
K80_node = ['c2178']#, 'c2181']
V100_node = ['d1022']#, 'd1012']
host_node = 'c0158'
testcase = args.tc
### also, change .h5 file folder in jobs ###
INTERVAL = 30 # make decision every 30s
run_log = open('run.log','w')
def K80_LUT(gpu):
quotient = int(gpu) // 8
remainder = int(gpu) % 8
real_node = K80_node[quotient]
real_gpu = str(remainder)
return real_node, real_gpu
def V100_LUT(gpu):
quotient = int(gpu) // 4
remainder = int(gpu) % 4
real_node = V100_node[quotient]
real_gpu = str(remainder)
return real_node, real_gpu
######################### do a regression fit ########################
with open('v100_data/x1_data.json') as f:
x1_v100 = json.load(f)
with open('v100_data/x2_data.json') as f:
x2_v100 = json.load(f)
with open('v100_data/x3_data.json') as f:
x3_v100 = json.load(f)
x1_norm = [(i - min(x1_v100)) / (max(x1_v100) - min(x1_v100)) for i in x1_v100]
x2_norm = [(i - min(x2_v100)) / (max(x2_v100) - min(x2_v100)) for i in x2_v100]
x3_norm = [(i - min(x3_v100)) / (max(x3_v100) - min(x3_v100)) for i in x3_v100]
# create training data
x_train = []
for i in range(len(x1_norm)):
x_train.append([x1_norm[i], x2_norm[i], x3_norm[i]])
with open('v100_data/y_data.json') as f:
y_train = json.load(f)
model_V100 = neighbors.KNeighborsRegressor(n_neighbors = 3, weights='distance')
model_V100.fit(x_train, y_train)
with open('k80_data/x1_data.json') as f:
x1_k80 = json.load(f)
with open('k80_data/x2_data.json') as f:
x2_k80 = json.load(f)
with open('k80_data/x3_data.json') as f:
x3_k80 = json.load(f)
x1_norm = [(i - min(x1_k80)) / (max(x1_k80) - min(x1_k80)) for i in x1_k80]
x2_norm = [(i - min(x2_k80)) / (max(x2_k80) - min(x2_k80)) for i in x2_k80]
x3_norm = [(i - min(x3_k80)) / (max(x3_k80) - min(x3_k80)) for i in x3_k80]
# create training k80
x_train = []
for i in range(len(x1_norm)):
x_train.append([x1_norm[i], x2_norm[i], x3_norm[i]])
with open('k80_data/y_data.json') as f:
y_train = json.load(f)
model_K80 = neighbors.KNeighborsRegressor(n_neighbors = 3, weights='distance')
model_K80.fit(x_train, y_train)
####################################################################
def send_signal(node, cmd):
# Create a TCP/IP socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
port = 10000
# Connect the socket to the port where the server is listening
server_address = (node, int(port))
print('connecting to {} port {}'.format(*server_address), file=run_log, flush=True)
sock.connect(server_address)
try:
# Send data
message = cmd.encode('utf-8') #b'save 35' #b'start 35 gpu 6'#b'save 35'
print('sending {!r}'.format(message), file=run_log, flush=True)
sock.sendall(message)
while True:
data = sock.recv(32)
if 'success' in data.decode('utf-8'):
# print('received {!r}'.format(data))
break
else:
print('waiting for success signal', file=run_log, flush=True)
time.sleep(1)
finally:
#print('closing socket')
sock.close()
def max_speedup_promotion(K80_free, V100_free, V100_job, promote_list, demote_list, force_demote):
num_demote = len(force_demote)
num_promote = len(promote_list)
V100_vacant = num_demote + V100_free
K80_vacant = num_promote + K80_free
global speedup_dict
if K80_vacant >= num_demote: # if more vacant K80s than demote jobs, always force demote
# selectively promote among active V100 jobs and promote list jobs
V100_qual = demote_list
#if 'idle' in V100_qual:
# V100_qual.remove('idle')
V100_pool = list(set(V100_qual).union(promote_list))
if num_promote <= V100_vacant: # promote all jobs as well
return promote_list[:], force_demote[:]
else: # promote the top 4 jobs
pool_dict = {}
V100_avail = V100_vacant + len(V100_qual)
for job in V100_pool:
if job in speedup_dict:
pool_dict[job] = speedup_dict[job]
sorted_pool = sorted(pool_dict, key=pool_dict.get, reverse=True)[:V100_avail]
promotion_list = list(set(promote_list).intersection(sorted_pool))
demotion_list = list(set(demote_list).difference(sorted_pool))
if 'idle' in demotion_list:
demotion_list.remove('idle') # this includes force demotion
# lazy migration, for every V100 job from high speeup to low speedup and not in sorted_pool, compare it with
# K80 jobs in sorted_pool, from low speedup to high speedup. If difference within 0.2, replace the K80 job
# in sorted pool
for job_demote in sorted(pool_dict, key=pool_dict.get, reverse=True):
if job_demote in demotion_list:
for job_promote in sorted(pool_dict, key=pool_dict.get, reverse=False):
if job_promote in promotion_list:
if speedup_dict[job_promote] - speedup_dict[job_demote] < 0.05:
demotion_list.remove(job_demote)
promotion_list.remove(job_promote)
break
return promotion_list, demotion_list
# situations below won't happen
elif V100_vacant >= num_promote: # if more vacant V100s than promote jobs, always promote
# less vacant K80s than demote jobs, select worst among force demote list
pool_dict = {} # here the pool only includes force demote jobs
for job in force_demote:
if job in speedup_dict:
pool_dict[job] = speedup_dict[job]
sorted_pool = sorted(pool_dict, key=pool_dict.get, reverse=False)[:K80_vacant]
if len(sorted_pool) > 0:
raise ValueError('Bug, demotion shouldnt happen because no practical complete')
return promote_list, sorted_pool
else:
raise ValueError('Bug with max speedup promotion, condition not considered')
def save_job(node, job): # save_job('c2176', '50')
# first wait for the job to be qualified for checkpointing
while True: # wait for ckpt_qual to be available
global ckpt_qual_dict
if ckpt_qual_dict['job'+job] == 1:
ckpt_qual_dict['job'+job] = 0
break
time.sleep(5)
global pid_dict
pid = pid_dict['job'+job]
send_signal(node, 'save ' + job + ' pid ' + pid) # 'save 50 pid 10000'
global ovhd_start
ovhd_start[job] = time.time()
time.sleep(3) # in case epoch_waste is communicate too frequently
def kill_job(node, job): # kill_job('c2176', '50')
send_signal(node, 'kill ' + job)
# resume job
def resume_job(node, gpu, job): # resume_job('c2176', '3', '50')
cmd = 'resume ' + job + ' gpu ' + gpu
send_signal(node, cmd)
# start job
def start_job(node, gpu, job):
cmd = 'start ' + job + ' gpu ' + gpu
send_signal(node, cmd)
# function that checks the tensorboard log of currently running jobs and logs jobs that have finished the first epoch
# in a global list. Once it's done, it will be in a queue to be promoted to V100 for 3 more epochs.
def check_step1_complete(job_list, node):
log_path = '/scratch/li.baol/tsrbrd_log/job_runs/' + testcase + '/'
global step1_job
global V100_epoch_time
global K80_epoch_time
for job in job_list:
if job not in step1_job and job != 'idle':
log_dir = log_path + 'job' + job + '/*'
dirs = glob.glob(log_dir)
dirs.sort()
if len(dirs) > 0:
tc = dirs[0]
iterator = EventAccumulator(tc).Reload()
tag = 'loss'
try:
if len(iterator.Scalars(tag)) > 2: # this way we can collect one epoch time
wall_time = [t.wall_time for t in iterator.Scalars(tag)]
if node in V100_node:
V100_epoch_time[job] = wall_time[1] - wall_time[0]
elif node in K80_node:
K80_epoch_time[job] = wall_time[1] - wall_time[0]
step1_job.append(job)
print('job' + job + ' has reached step1 complete', file=run_log, flush=True)
except Exception:
pass
def check_step2_complete(job_list, node):
log_path = '/scratch/li.baol/tsrbrd_log/job_runs/' + testcase + '/'
global step1_job
global step2_job
global V100_epoch_time
global K80_epoch_time
global speedup_dict
for job in job_list:
if job in step1_job and job not in step2_job and job != 'idle':
log_dir = log_path + 'job' + job + '/*'
dirs = glob.glob(log_dir)
dirs.sort()
if len(dirs) > 1:
tc = dirs[1]
iterator = EventAccumulator(tc).Reload()
tag = 'loss'
try:
if len(iterator.Scalars(tag)) > 2: # this way we can collect one epoch time
wall_time = [t.wall_time for t in iterator.Scalars(tag)]
if node in K80_node:
K80_epoch_time[job] = wall_time[1] - wall_time[0]
V100_time_step2 = V100_epoch_time[job]
K80_time_step2 = wall_time[1] - wall_time[0]
elif node in V100_node:
V100_epoch_time[job] = wall_time[1] - wall_time[0]
K80_time_step2 = K80_epoch_time[job]
V100_time_step2 = wall_time[1] - wall_time[0]
speedup = (K80_time_step2 - V100_time_step2) / K80_time_step2
speedup_dict[job] = speedup
step2_job.append(job)
print('job' + job + ' has reached step2 complete', file=run_log, flush=True)
except Exception:
pass
# measure job
def measure_job(node, gpu, job):
cmd = 'measure ' + job + ' gpu ' + gpu
send_signal(node, cmd)
############### first clear finish status of all jobs ####################
pid_dict = {}
for i in range(len(queue)):
job_name = 'job' + str(i + 1)
pid_dict[job_name] = 0
checkpoint_dict = {}
for i in range(len(queue)):
job_name = 'job' + str(i + 1)
checkpoint_dict[job_name] = 0
ckpt_qual_dict = {}
for i in range(len(queue)):
job_name = 'job' + str(i + 1)
ckpt_qual_dict[job_name] = 0
finish_dict = {}
for i in range(len(queue)):
job_name = 'job' + str(i + 1)
finish_dict[job_name] = 0
epoch_waste_dict = {}
for i in range(len(queue)):
job_name = 'job' + str(i + 1)
epoch_waste_dict[job_name] = 0
#################### background thread running TCP socket ########################
def thread_function():
# here listen on the socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_address = (host_node, 10002)
print('starting up on {} port {}'.format(*server_address), file=run_log, flush=True)
sock.bind(server_address)
sock.listen(5)
while True:
# Wait for a connection
connection, client_address = sock.accept()
try:
while True:
data = connection.recv(32)
if data:
data_str = data.decode('utf-8')
global K80_start_time
global V100_start_time, promote_start_time
global K80_job
global V100_job
global K80_time
global V100_time
global ovhd_a, ovhd_b, ovhd_c, ovhd_d, k80_1st, v100_1st, ovhd_start, overhead, ovhd_total
global b_start, c_start, d_start, completion
if 'ckpt_qual' in data_str:
global ckpt_qual_dict
job_name = data_str.split(' ')[0]
ckpt_qual_dict[job_name] = 1
elif 'finish' in data_str:
global finish_dict
job_name = data_str.split(' ')[0]
job = job_name.replace('job','')
finish_dict[job_name] = 1
JCT[job] = int(time.time() - job_start[job])
if job in list(K80_job.values()):
K80_time[job] += int(time.time() - K80_start_time[job])
elif job in list(V100_job.values()):
V100_time[job] += int(time.time() - V100_start_time[job])
elif 'pid' in data_str:
global pid_dict
job_name = data_str.split(' ')[0]
pid = data_str.split(' ')[2]
pid_dict[job_name] = pid
elif 'checkpoint' in data_str: # can only be received after save signal is sent
global checkpoint_dict
job_name = data_str.split(' ')[0]
job = job_name.replace('job','')
checkpoint_dict[job_name] = 1
ovhd_a[job].append(int(time.time() - ovhd_start[job]))
b_start[job] = time.time()
elif 'waste' in data_str:
global epoch_waste_dict
job_name = data_str.split(' ')[0]
epoch_waste_time = data_str.split(' ')[2]
epoch_waste_dict[job_name] += int(epoch_waste_time)
elif 'b_end' in data_str:
job_name = data_str.split(' ')[0]
job = job_name.replace('job','')
ovhd_b[job].append(int(time.time() - b_start[job]))
c_start[job] = time.time()
elif 'c_end' in data_str:
job_name = data_str.split(' ')[0]
job = job_name.replace('job','')
ovhd_c[job].append(int(time.time() - c_start[job]))
d_start[job] = time.time()
elif 'd_end' in data_str:
job_name = data_str.split(' ')[0]
job = job_name.replace('job','')
ovhd_d[job].append(int(time.time() - d_start[job]))
ovhd_total[job].append(int(time.time() - ovhd_start[job]))
if ovhd_start[job] != 0:
overhead[job] += int(time.time() - ovhd_start[job])
ovhd_start[job] = 0
if job in list(K80_job.values()):
K80_start_time[job] = time.time()
elif job in list(V100_job.values()):
V100_start_time[job] = time.time()
promote_start_time[job] = time.time()
elif '1st_epoch' in data_str: # 'job50 1st_epoch 35'
job_name = data_str.split(' ')[0]
job = job_name.replace('job','')
epoch_time = int(data_str.split(' ')[2])
if job in list(K80_job.values()):
k80_1st[job].append(epoch_time)
elif job in list(V100_job.values()):
v100_1st[job].append(epoch_time)
elif 'completion' in data_str: # 'job50 completion 0.33'
job_name = data_str.split(' ')[0]
job = job_name.replace('job','')
completion_portion = float(data_str.split(' ')[2])
completion[job] = completion_portion
#if 'ckpt_qual' in data_str or 'finish' in data_str or 'checkpoint' in data_str:
# print('received ' + data_str)
connection.sendall(b'success')
#time.sleep(5)
else:
break
finally:
connection.close()
x = threading.Thread(target=thread_function, daemon=True)
x.start()
###############################################################################
######################################################################
while True:
# termination condition:
# all the jobs have finished
################### check for finished jobs on K80 and V100 ##############################
for gpu, job in K80_job.items():
if job != 'idle':
if finish_dict['job'+job] == 1:
K80_used -= 1
K80_job[gpu] = 'idle'
print('K80 finished job: ' + job, file=run_log, flush=True)
for gpu, job in V100_job.items():
if job != 'idle':
if finish_dict['job'+job] == 1:
V100_used -= 1
V100_job[gpu] = 'idle'
print('V100 finished job: ' + job, file=run_log, flush=True)
if job in demote_list:
demote_list.remove(job)
################ check step1 finished job of K80 jobs and step 2 of V100 #################
check_step1_complete(list(V100_job.values()), V100_node[0])
check_step2_complete(list(K80_job.values()), K80_node[0])
for gpu, job in V100_job.items():
if job not in qualified_job and job != 'idle':
if job in step1_job:
real_node, real_gpu = V100_LUT(gpu)
kill_job(real_node, job)
qualified_job.append(job)
print('job ' + job + ' has been qualified for demotion to K80', file=run_log, flush=True)
time.sleep(3) # wait for run.sh to finish
x1, x3 = gpu_pwr.process_csv('job'+job, testcase)
x2 = 3600 / V100_epoch_time[job] # num of epochs per hr
# preprocess the data
x1 = (x1 - min(x1_v100)) / (max(x1_v100) - min(x1_v100))
x2 = (x2 - min(x2_v100)) / (max(x2_v100) - min(x2_v100))
x3 = (x3 - min(x3_v100)) / (max(x3_v100) - min(x3_v100))
speedup_pred = model_V100.predict(np.array([x1, x2, x3]).reshape((1,-1)))[0] / 100
speedup_dict[job] = speedup_pred
predict_dict[job] = speedup_pred
check_step1_complete(list(K80_job.values()), K80_node[0])
check_step2_complete(list(V100_job.values()), V100_node[0])
for gpu, job in K80_job.items():
if job not in qualified_job and job != 'idle':
if job in step1_job:
real_node, real_gpu = K80_LUT(gpu)
kill_job(real_node, job)
qualified_job.append(job)
print('job ' + job + ' has been qualified for promotion to V100', file=run_log, flush=True)
time.sleep(3) # wait for run.sh to finish
x1, x3 = gpu_pwr.process_csv('job'+job, testcase)
x2 = 3600 / K80_epoch_time[job]
# preprocess the data
x1 = (x1 - min(x1_k80)) / (max(x1_k80) - min(x1_k80))
x2 = (x2 - min(x2_k80)) / (max(x2_k80) - min(x2_k80))
x3 = (x3 - min(x3_k80)) / (max(x3_k80) - min(x3_k80))
speedup_pred = model_K80.predict(np.array([x1, x2, x3]).reshape((1,-1)))[0] / 100
speedup_dict[job] = speedup_pred
predict_dict[job] = speedup_pred
############### start new jobs on idle K80s and V100s before promoting K80 jobs to idle V100 ################
if V100_used < V100_cap:
V100_free = V100_cap - V100_used
for i in range(V100_free):
time_passed = int(time.time() - queue_timer)
if index < len(queue) and queue_dict[queue[index]] < time_passed: # make sure job has arrived in the queue
job_new = str(queue[index])
for gpu, job in V100_job.items():
if job == 'idle': # schedule new job here if idle
real_node, real_gpu = V100_LUT(gpu)
start_job(real_node, real_gpu, job_new)
birthplace[job_new] = real_node
measure_job(real_node, real_gpu, job_new)
V100_job[gpu] = job_new
job_start[job_new] = time.time()
queue_delay[job_new] = int(time_passed - queue_dict[queue[index]])
V100_start_time[job_new] = time.time()
index += 1
V100_used += 1
time.sleep(5) # don't communicate too often
break
if K80_used < K80_cap:
K80_free = K80_cap - K80_used
for i in range(K80_free):
time_passed = int(time.time() - queue_timer)
if index < len(queue) and queue_dict[queue[index]] < time_passed: # make sure job has arrived in the queue
job_new = str(queue[index])
for gpu, job in K80_job.items():
if job == 'idle': # schedule new job here if idle
real_node, real_gpu = K80_LUT(gpu)
start_job(real_node, real_gpu, job_new)
birthplace[job_new] = real_node
measure_job(real_node, real_gpu, job_new)
K80_job[gpu] = job_new
job_start[job_new] = time.time()
queue_delay[job_new] = int(time_passed - queue_dict[queue[index]])
K80_start_time[job_new] = time.time()
index += 1
K80_used += 1
time.sleep(5) # don't communicate too often
break
################ make promotion decisions ########################
V100_free = V100_cap - V100_used
K80_free = K80_cap - K80_used
promote_list = [] #list(set(qualified_job).intersection(list(K80_job.values())).difference(pc_job))
for gpu, job in K80_job.items():
if job != 'idle':
if job in step2_job and len(ovhd_total[job]) > 0:
promote_list.append(job)
elif job not in step2_job and job in qualified_job and birthplace[job] in K80_node:
promote_list.append(job)
# this returns job forced to be demoted. Currently in V100, and is practically complete
force_demote = list(set(list(V100_job.values())).intersection(pc_job))
# look at demote list
for gpu, job in V100_job.items():
if job != 'idle':
# for jobs who have finished profiling, added the job
if job not in demote_list and job in step2_job and len(ovhd_total[job]) > 0:
job_speedup = speedup_dict[job] # 0.7
job_ovhd = np.mean(ovhd_total[job]) # 100
k80_1st_ovhd = np.mean(k80_1st[job]) - K80_epoch_time[job]
v100_1st_ovhd = np.mean(v100_1st[job]) - V100_epoch_time[job]
demote_qualify_time = (2 * job_ovhd + k80_1st_ovhd + v100_1st_ovhd) / job_speedup
if int(time.time() - promote_start_time[job]) > max(demote_qualify_time, max(v100_1st[job])):
demote_list.append(job)
print('job' + job + 'qualified for demote for passing demote qualify time ' +
str(int(demote_qualify_time)), file=run_log, flush=True)
# for jobs who have not finished profiling, add the job if it's qualified and it started on V100
elif job not in demote_list and job not in step2_job and job in qualified_job and birthplace[job] in V100_node:
demote_list.append(job)
print('job' + job + 'qualified for demote for profiling', file=run_log, flush=True)
if len(promote_list) > 0 or len(demote_list) > 0:
promoted, demoted = max_speedup_promotion(K80_free, V100_free, V100_job, promote_list, demote_list, force_demote)
if len(promoted) > 0:
print('promoted jobs: ', promoted, file=run_log, flush=True)
if len(demoted) > 0:
print('demoted jobs: ', demoted, file=run_log, flush=True)
# stop all promoted jobs on K80
checkpoint_finish_check = []
for gpu, job in K80_job.items():
if job in promoted:
# make sure promoted step1 job doesn't get demoted back before finishing profiling
if job in step1_job and job not in step2_job:
speedup_dict[job] = 1
real_node, real_gpu = K80_LUT(gpu)
save_job(real_node, job)
if finish_dict['job'+job] != 1:
K80_time[job] += int(time.time() - K80_start_time[job])
checkpoint_finish_check.append(job)
K80_job[gpu] = 'idle'
K80_used -= 1
# stop all demoted jobs on V100
for gpu, job in V100_job.items():
if job in demoted:
# make sure demoted step1 job doesn't get promoted back before finishing profiling
if job in step1_job and job not in step2_job:
speedup_dict[job] = 0.01
real_node, real_gpu = V100_LUT(gpu)
save_job(real_node, job)
if finish_dict['job'+job] != 1:
V100_time[job] += int(time.time() - V100_start_time[job])
checkpoint_finish_check.append(job)
V100_job[gpu] = 'idle'
V100_used -= 1
demote_list.remove(job)
# wait for all GPUs to be available
if len(checkpoint_finish_check) > 0:
while True:
time.sleep(5)
for job in checkpoint_finish_check[:]:
if checkpoint_dict['job'+job] == 1: # checkpoint has finished, gpu is free
print(job + ' checkpointed successfully', file=run_log, flush=True)
checkpoint_dict['job'+job] = 0 # reset it
checkpoint_finish_check.remove(job)
# also check if job already finished before sending checkpoint signal
elif finish_dict['job'+job] == 1:
print(job + ' finished before receiving checkpoint signal', file=run_log, flush=True)
checkpoint_finish_check.remove(job)
if len(checkpoint_finish_check) == 0:
break
# give it some time to cleanup old checkpointed jobs
time.sleep(3)
# resume promoted jobs on V100, make sure the gpu is idle
for job_new in promoted[:]:
if finish_dict['job'+job_new] != 1:
for gpu, job in V100_job.items():
if job == 'idle': # if gpu idle, schedule new job here
V100_job[gpu] = job_new
real_node, real_gpu = V100_LUT(gpu)
resume_job(real_node, real_gpu, job_new)
num_mig[job_new] += 1
promoted.remove(job_new)
V100_used += 1
break
else: # job has already finished before checkpointing
promoted.remove(job_new)
# resume demoted jobs on K80, make sure the gpu is idle
for job_new in demoted[:]:
if finish_dict['job'+job_new] != 1:
for gpu, job in K80_job.items():
if job == 'idle': # if gpu idle, schedule new job here
real_node, real_gpu = K80_LUT(gpu)
resume_job(real_node, real_gpu, job_new)
num_mig[job_new] += 1
K80_job[gpu] = job_new
demoted.remove(job_new)
K80_used += 1
break
else: # job has already finished before checkpointing
print('job'+job_new+' has finished before checkpointing', file=run_log, flush=True)
demoted.remove(job_new)
# perform a check, make sure all promoted/demoted jobs are scheduled
if len(promoted) > 0 or len(demoted) > 0:
raise ValueError('Bug with promotion scheme, more jobs than free gpus')
############## monitor GPU usage ############
usage = K80_used + V100_used
time_stamp = int(time.time() - queue_timer)
gpu_usage_time.append(time_stamp)
gpu_usage.append(usage)
total_completion = np.sum(list(completion.values()))
gpu_usage_completion.append(total_completion)
############### wait for next iteration
time.sleep(INTERVAL)
################ check if termination condition is met ################
K80_idle_num = sum(value == 'idle' for value in K80_job.values())
V100_idle_num = sum(value == 'idle' for value in V100_job.values())
if K80_idle_num == K80_cap and V100_idle_num == V100_cap and index == len(queue):
print('all jobs are finished!', file=run_log, flush=True)
break
# get average JCT
average_JCT = np.average(list(JCT.values()))
JCT['average'] = average_JCT
average_overhead = np.average(list(overhead.values()))
overhead['average'] = average_overhead
average_queue_delay = np.average(list(queue_delay.values()))
queue_delay['average'] = average_queue_delay
# after everything is finished
print('finished all runs', file=run_log, flush=True)
JCT_name = testcase + '_JCT.json'
overhead_name = testcase + '_overhead.json'
num_mig_name = testcase + '_num_mig.json'
epoch_waste_name = testcase + '_epoch_waste.json'
ckpt_qual_name = 'ckpt_qual.json'
finish_name = 'finish.json'
K80_time_name = testcase + '_K80_time.json'
V100_time_name = testcase + '_V100_time.json'
gpu_usage_name = testcase + '_gpu_usage.csv'
ovhd_a_name = testcase + '_ovhd_a.json'
ovhd_b_name = testcase + '_ovhd_b.json'
ovhd_c_name = testcase + '_ovhd_c.json'
ovhd_d_name = testcase + '_ovhd_d.json'
ovhd_total_name = testcase + '_ovhd_total.json'
k80_1st_name = testcase + '_k80_1st.json'
v100_1st_name = testcase + '_v100_1st.json'
speedup_name = 'speedup.json'
predict_name = 'predict.json'
demote_list_name = 'demote_list.json'
completion_name = 'completion.json'
queue_delay_name = testcase + '_queue_delay.json'
birthplace_name = testcase + '_birthplace.json'
with open(JCT_name, 'w') as fp1:
json.dump(JCT, fp1, sort_keys=True, indent=4)
with open(overhead_name, 'w') as fp3:
json.dump(overhead, fp3, sort_keys=True, indent=4)
with open(num_mig_name, 'w') as fp3:
json.dump(num_mig, fp3, sort_keys=True, indent=4)
with open(epoch_waste_name, 'w') as fp3:
json.dump(epoch_waste_dict, fp3, sort_keys=True, indent=4)
with open(ckpt_qual_name, 'w') as fp1:
json.dump(ckpt_qual_dict, fp1, sort_keys=True, indent=4)
with open(finish_name, 'w') as fp1:
json.dump(finish_dict, fp1, sort_keys=True, indent=4)
with open(K80_time_name, 'w') as fp3:
json.dump(K80_time, fp3, sort_keys=True, indent=4)
with open(V100_time_name, 'w') as fp3:
json.dump(V100_time, fp3, sort_keys=True, indent=4)
with open(ovhd_a_name, 'w') as fp3:
json.dump(ovhd_a, fp3, sort_keys=True, indent=4)
with open(ovhd_b_name, 'w') as fp3:
json.dump(ovhd_b, fp3, sort_keys=True, indent=4)
with open(ovhd_c_name, 'w') as fp3:
json.dump(ovhd_c, fp3, sort_keys=True, indent=4)
with open(ovhd_d_name, 'w') as fp3:
json.dump(ovhd_d, fp3, sort_keys=True, indent=4)
with open(ovhd_total_name, 'w') as fp3:
json.dump(ovhd_total, fp3, sort_keys=True, indent=4)
with open(k80_1st_name, 'w') as fp3:
json.dump(k80_1st, fp3, sort_keys=True, indent=4)
with open(v100_1st_name, 'w') as fp3:
json.dump(v100_1st, fp3, sort_keys=True, indent=4)
with open(speedup_name, 'w') as fp1:
json.dump(speedup_dict, fp1, sort_keys=True, indent=4)
with open(predict_name, 'w') as fp1:
json.dump(predict_dict, fp1, sort_keys=True, indent=4)
with open(demote_list_name, 'w') as fp1:
json.dump(demote_list, fp1, sort_keys=True, indent=4)
with open(completion_name, 'w') as fp1:
json.dump(completion, fp1, sort_keys=True, indent=4)
with open(queue_delay_name, 'w') as fp1:
json.dump(queue_delay, fp1, sort_keys=True, indent=4)
with open(birthplace_name, 'w') as fp1:
json.dump(birthplace, fp1, sort_keys=True, indent=4)
gpu_usage_time = np.asarray(gpu_usage_time)
gpu_usage = | np.asarray(gpu_usage) | numpy.asarray |
"""
brief: face alignment with FFHQ method (https://github.com/NVlabs/ffhq-dataset)
author: lzhbrian (https://lzhbrian.me)
date: 2020.1.5
note: code is heavily borrowed from
https://github.com/NVlabs/ffhq-dataset
http://dlib.net/face_landmark_detection.py.html
requirements:
apt install cmake
conda install Pillow numpy scipy
pip install dlib
# download face landmark model from:
# http://dlib.net/files/shape_predictor_68_face_landmarks.dat.bz2
"""
import PIL
import PIL.Image
import os
import scipy
import scipy.ndimage
import dlib
import cv2
import numpy as np
from PIL import Image
import torchvision.transforms.functional as fn
from argparse import ArgumentParser
# download model from: http://dlib.net/files/shape_predictor_68_face_landmarks.dat.bz2
predictor = dlib.shape_predictor('./resources/shape_predictor_68_face_landmarks.dat')
def get_landmark(filepath):
"""get landmark with dlib
:return: np.array shape=(68, 2)
"""
detector = dlib.get_frontal_face_detector()
img = dlib.load_rgb_image(filepath)
dets = detector(img, 1)
print("Number of faces detected: {}".format(len(dets)))
for k, d in enumerate(dets):
print("Detection {}: Left: {} Top: {} Right: {} Bottom: {}".format(
k, d.left(), d.top(), d.right(), d.bottom()))
# Get the landmarks/parts for the face in box d.
shape = predictor(img, d)
print("Part 0: {}, Part 1: {} ...".format(shape.part(0), shape.part(1)))
t = list(shape.parts())
a = []
for tt in t:
a.append([tt.x, tt.y])
lm = np.array(a)
# lm is a shape=(68,2) np.array
return lm
def align_face(filepath):
"""
:param filepath: str
:return: PIL Image
"""
lm = get_landmark(filepath)
lm_chin = lm[0 : 17] # left-right
lm_eyebrow_left = lm[17 : 22] # left-right
lm_eyebrow_right = lm[22 : 27] # left-right
lm_nose = lm[27 : 31] # top-down
lm_nostrils = lm[31 : 36] # top-down
lm_eye_left = lm[36 : 42] # left-clockwise
lm_eye_right = lm[42 : 48] # left-clockwise
lm_mouth_outer = lm[48 : 60] # left-clockwise
lm_mouth_inner = lm[60 : 68] # left-clockwise
# Calculate auxiliary vectors.
eye_left = np.mean(lm_eye_left, axis=0)
eye_right = np.mean(lm_eye_right, axis=0)
eye_avg = (eye_left + eye_right) * 0.5
eye_to_eye = eye_right - eye_left
mouth_left = lm_mouth_outer[0]
mouth_right = lm_mouth_outer[6]
mouth_avg = (mouth_left + mouth_right) * 0.5
eye_to_mouth = mouth_avg - eye_avg
# Choose oriented crop rectangle.
x = eye_to_eye - np.flipud(eye_to_mouth) * [-1, 1]
x /= np.hypot(*x)
x *= max(np.hypot(*eye_to_eye) * 2.0, np.hypot(*eye_to_mouth) * 1.8)
y = np.flipud(x) * [-1, 1]
c = eye_avg + eye_to_mouth * 0.1
quad = np.stack([c - x - y, c - x + y, c + x + y, c + x - y])
qsize = np.hypot(*x) * 2
# read image
img = PIL.Image.open(filepath)
output_size=1024
transform_size=4096
enable_padding=True
# Shrink.
shrink = int(np.floor(qsize / 1024 * 0.5))
if shrink > 1:
rsize = (int(np.rint(float(img.size[0]) / shrink)), int(np.rint(float(img.size[1]) / shrink)))
img = img.resize(rsize, PIL.Image.ANTIALIAS)
quad /= shrink
qsize /= shrink
# Crop.
border = max(int(np.rint(qsize * 0.1)), 3)
crop = (int(np.floor(min(quad[:,0]))), int(np.floor(min(quad[:,1]))), int(np.ceil(max(quad[:,0]))), int(np.ceil(max(quad[:,1]))))
crop = (max(crop[0] - border, 0), max(crop[1] - border, 0), min(crop[2] + border, img.size[0]), min(crop[3] + border, img.size[1]))
if crop[2] - crop[0] < img.size[0] or crop[3] - crop[1] < img.size[1]:
img = img.crop(crop)
quad -= crop[0:2]
# Pad.
pad = (int(np.floor(min(quad[:,0]))), int(np.floor(min(quad[:,1]))), int(np.ceil(max(quad[:,0]))), int(np.ceil(max(quad[:,1]))))
pad = (max(-pad[0] + border, 0), max(-pad[1] + border, 0), max(pad[2] - img.size[0] + border, 0), max(pad[3] - img.size[1] + border, 0))
if enable_padding and max(pad) > border - 4:
pad = np.maximum(pad, int(np.rint(qsize * 0.3)))
img = np.pad(np.float32(img), ((pad[1], pad[3]), (pad[0], pad[2]), (0, 0)), 'reflect')
h, w, _ = img.shape
y, x, _ = np.ogrid[:h, :w, :1]
mask = np.maximum(1.0 - np.minimum(np.float32(x) / pad[0], np.float32(w-1-x) / pad[2]), 1.0 - np.minimum(np.float32(y) / pad[1], np.float32(h-1-y) / pad[3]))
blur = qsize * 0.02
img += (scipy.ndimage.gaussian_filter(img, [blur, blur, 0]) - img) * np.clip(mask * 3.0 + 1.0, 0.0, 1.0)
img += (np.median(img, axis=(0,1)) - img) * np.clip(mask, 0.0, 1.0)
img = PIL.Image.fromarray(np.uint8(np.clip(np.rint(img), 0, 255)), 'RGB')
quad += pad[:2]
# Transform.
img = img.transform((transform_size, transform_size), PIL.Image.QUAD, (quad + 0.5).flatten(), PIL.Image.BILINEAR)
if output_size < transform_size:
img = img.resize((output_size, output_size), PIL.Image.ANTIALIAS)
# Save aligned image.
return img
def get_landmark_npy(img):
"""get landmark with dlib
:return: np.array shape=(68, 2)
"""
detector = dlib.get_frontal_face_detector()
dets = detector(img, 1)
if len(dets) == 0:
raise RuntimeError("No faces found")
print("Number of faces detected: {}".format(len(dets)))
for k, d in enumerate(dets):
print("Detection {}: Left: {} Top: {} Right: {} Bottom: {}".format(
k, d.left(), d.top(), d.right(), d.bottom()))
# Get the landmarks/parts for the face in box d.
shape = predictor(img, d)
print("Part 0: {}, Part 1: {} ...".format(shape.part(0), shape.part(1)))
t = list(shape.parts())
a = []
for tt in t:
a.append([tt.x, tt.y])
lm = np.array(a)
# lm is a shape=(68,2) np.array
return lm
def align_face_npy(img, output_size=1024):
lm = get_landmark_npy(img)
lm_chin = lm[0 : 17] # left-right
lm_eyebrow_left = lm[17 : 22] # left-right
lm_eyebrow_right = lm[22 : 27] # left-right
lm_nose = lm[27 : 31] # top-down
lm_nostrils = lm[31 : 36] # top-down
lm_eye_left = lm[36 : 42] # left-clockwise
lm_eye_right = lm[42 : 48] # left-clockwise
lm_mouth_outer = lm[48 : 60] # left-clockwise
lm_mouth_inner = lm[60 : 68] # left-clockwise
# Calculate auxiliary vectors.
eye_left = np.mean(lm_eye_left, axis=0)
eye_right = np.mean(lm_eye_right, axis=0)
eye_avg = (eye_left + eye_right) * 0.5
eye_to_eye = eye_right - eye_left
mouth_left = lm_mouth_outer[0]
mouth_right = lm_mouth_outer[6]
mouth_avg = (mouth_left + mouth_right) * 0.5
eye_to_mouth = mouth_avg - eye_avg
# Choose oriented crop rectangle.
x = eye_to_eye - np.flipud(eye_to_mouth) * [-1, 1]
x /= np.hypot(*x)
x *= max(np.hypot(*eye_to_eye) * 2.0, np.hypot(*eye_to_mouth) * 1.8)
y = np.flipud(x) * [-1, 1]
c = eye_avg + eye_to_mouth * 0.1
quad = np.stack([c - x - y, c - x + y, c + x + y, c + x - y])
qsize = np.hypot(*x) * 2
img = Image.fromarray(img)
transform_size=4096
enable_padding=True
# Shrink.
shrink = int(np.floor(qsize / 1024 * 0.5))
if shrink > 1:
rsize = (int(np.rint(float(img.size[0]) / shrink)), int(np.rint(float(img.size[1]) / shrink)))
img = img.resize(rsize, PIL.Image.ANTIALIAS)
quad /= shrink
qsize /= shrink
# Crop.
border = max(int(np.rint(qsize * 0.1)), 3)
crop = (int(np.floor(min(quad[:,0]))), int(np.floor(min(quad[:,1]))), int(np.ceil(max(quad[:,0]))), int(np.ceil(max(quad[:,1]))))
crop = (max(crop[0] - border, 0), max(crop[1] - border, 0), min(crop[2] + border, img.size[0]), min(crop[3] + border, img.size[1]))
if crop[2] - crop[0] < img.size[0] or crop[3] - crop[1] < img.size[1]:
img = img.crop(crop)
quad -= crop[0:2]
# Pad.
pad = (int(np.floor(min(quad[:,0]))), int(np.floor(min(quad[:,1]))), int(np.ceil(max(quad[:,0]))), int(np.ceil(max(quad[:,1]))))
pad = (max(-pad[0] + border, 0), max(-pad[1] + border, 0), max(pad[2] - img.size[0] + border, 0), max(pad[3] - img.size[1] + border, 0))
if enable_padding and max(pad) > border - 4:
pad = np.maximum(pad, int(np.rint(qsize * 0.3)))
img = np.pad(np.float32(img), ((pad[1], pad[3]), (pad[0], pad[2]), (0, 0)), 'reflect')
h, w, _ = img.shape
y, x, _ = np.ogrid[:h, :w, :1]
mask = np.maximum(1.0 - np.minimum(np.float32(x) / pad[0], np.float32(w-1-x) / pad[2]), 1.0 - np.minimum(np.float32(y) / pad[1], np.float32(h-1-y) / pad[3]))
blur = qsize * 0.02
img += (scipy.ndimage.gaussian_filter(img, [blur, blur, 0]) - img) * np.clip(mask * 3.0 + 1.0, 0.0, 1.0)
img += (np.median(img, axis=(0,1)) - img) * np.clip(mask, 0.0, 1.0)
img = PIL.Image.fromarray(np.uint8(np.clip(np.rint(img), 0, 255)), 'RGB')
quad += pad[:2]
# Transform.
img = img.transform((transform_size, transform_size), PIL.Image.QUAD, (quad + 0.5).flatten(), PIL.Image.BILINEAR)
if output_size < transform_size:
img = img.resize((output_size, output_size), PIL.Image.ANTIALIAS)
# Save aligned image.
return np.array(img)
def align_face_npy_with_params(img, output_size=1204):
lm = get_landmark_npy(img)
lm_chin = lm[0 : 17] # left-right
lm_eyebrow_left = lm[17 : 22] # left-right
lm_eyebrow_right = lm[22 : 27] # left-right
lm_nose = lm[27 : 31] # top-down
lm_nostrils = lm[31 : 36] # top-down
lm_eye_left = lm[36 : 42] # left-clockwise
lm_eye_right = lm[42 : 48] # left-clockwise
lm_mouth_outer = lm[48 : 60] # left-clockwise
lm_mouth_inner = lm[60 : 68] # left-clockwise
# Calculate auxiliary vectors.
eye_left = np.mean(lm_eye_left, axis=0)
eye_right = np.mean(lm_eye_right, axis=0)
eye_avg = (eye_left + eye_right) * 0.5
eye_to_eye = eye_right - eye_left
mouth_left = lm_mouth_outer[0]
mouth_right = lm_mouth_outer[6]
mouth_avg = (mouth_left + mouth_right) * 0.5
eye_to_mouth = mouth_avg - eye_avg
# Choose oriented crop rectangle.
x = eye_to_eye - np.flipud(eye_to_mouth) * [-1, 1]
x /= np.hypot(*x)
x *= max(np.hypot(*eye_to_eye) * 2.0, np.hypot(*eye_to_mouth) * 1.8)
y = np.flipud(x) * [-1, 1]
c = eye_avg + eye_to_mouth * 0.1
quad = np.stack([c - x - y, c - x + y, c + x + y, c + x - y])
qsize = np.hypot(*x) * 2
img = Image.fromarray(img)
transform_size=4096
enable_padding=True
# Shrink.
shrink = int(np.floor(qsize / 1024 * 0.5))
if shrink > 1:
rsize = (int(np.rint(float(img.size[0]) / shrink)), int(np.rint(float(img.size[1]) / shrink)))
img = img.resize(rsize, PIL.Image.ANTIALIAS)
quad /= shrink
qsize /= shrink
shrunk_image = img
# Crop.
border = max(int(np.rint(qsize * 0.1)), 3)
crop = (int(np.floor(min(quad[:,0]))), int(np.floor(min(quad[:,1]))), int(np.ceil(max(quad[:,0]))), int(np.ceil(max(quad[:,1]))))
crop = (max(crop[0] - border, 0), max(crop[1] - border, 0), min(crop[2] + border, img.size[0]), min(crop[3] + border, img.size[1]))
actual_crop = (0, 0, 0, 0)
if crop[2] - crop[0] < img.size[0] or crop[3] - crop[1] < img.size[1]:
actual_crop = crop
img = img.crop(crop)
quad -= crop[0:2]
# # Pad.
pad = (int(np.floor(min(quad[:,0]))), int(np.floor(min(quad[:,1]))), int(np.ceil(max(quad[:,0]))), int(np.ceil(max(quad[:,1]))))
pad = (max(-pad[0] + border, 0), max(-pad[1] + border, 0), max(pad[2] - img.size[0] + border, 0), max(pad[3] - img.size[1] + border, 0))
actual_padding = (0, 0, 0, 0)
if enable_padding and max(pad) > border - 4:
pad = np.maximum(pad, int(np.rint(qsize * 0.3)))
img = np.pad(np.float32(img), ((pad[1], pad[3]), (pad[0], pad[2]), (0, 0)), 'reflect')
actual_padding = pad
h, w, _ = img.shape
y, x, _ = np.ogrid[:h, :w, :1]
mask = np.maximum(1.0 - np.minimum(np.float32(x) / pad[0], np.float32(w-1-x) / pad[2]), 1.0 - np.minimum(np.float32(y) / pad[1], np.float32(h-1-y) / pad[3]))
blur = qsize * 0.02
img += (scipy.ndimage.gaussian_filter(img, [blur, blur, 0]) - img) * np.clip(mask * 3.0 + 1.0, 0.0, 1.0)
img += (np.median(img, axis=(0,1)) - img) * np.clip(mask, 0.0, 1.0)
img = PIL.Image.fromarray(np.uint8(np.clip(np.rint(img), 0, 255)), 'RGB')
quad += pad[:2]
padded_img = img
# # Transform.
img = img.transform((transform_size, transform_size), PIL.Image.QUAD, (quad + 0.5).flatten(), PIL.Image.BILINEAR)
if output_size < transform_size:
img = img.resize((output_size, output_size), PIL.Image.ANTIALIAS)
# Save aligned image.
return np.array(img), [shrink, actual_crop, actual_padding, quad, padded_img, shrunk_image]
def unalign_face_npy(aligned_image, alignment_params):
# Shrinking of the original image means that the face was too large to be represented
# in the output size anyway, so it doesn't make sense to reverse it.
shrink, crop, padding, quad, padded_img, shrunk_image = alignment_params
def build_perspective(srcpts, dstpts):
srcpts = np.array(srcpts)
dstpts = np.array(dstpts)
A = \
[
# x1
[srcpts[0, 0], srcpts[0, 1], 1, 0, 0, 0, -srcpts[0, 0] * dstpts[0, 0], -srcpts[0, 1] * dstpts[0, 0]],
[0, 0, 0, srcpts[0, 0], srcpts[0, 1], 1, -srcpts[0, 0] * dstpts[0, 1], -srcpts[0, 1] * dstpts[0, 1]],
# x2
[srcpts[1, 0], srcpts[1, 1], 1, 0, 0, 0, -srcpts[1, 0] * dstpts[1, 0], -srcpts[1, 1] * dstpts[1, 0]],
[0, 0, 0, srcpts[1, 0], srcpts[1, 1], 1, -srcpts[1, 0] * dstpts[1, 1], -srcpts[1, 1] * dstpts[1, 1]],
# x3
[srcpts[2, 0], srcpts[2, 1], 1, 0, 0, 0, -srcpts[2, 0] * dstpts[2, 0], -srcpts[2, 1] * dstpts[2, 0]],
[0, 0, 0, srcpts[2, 0], srcpts[2, 1], 1, -srcpts[2, 0] * dstpts[2, 1], -srcpts[2, 1] * dstpts[2, 1]],
# x4
[srcpts[3, 0], srcpts[3, 1], 1, 0, 0, 0, -srcpts[3, 0] * dstpts[3, 0], -srcpts[3, 1] * dstpts[3, 0]],
[0, 0, 0, srcpts[3, 0], srcpts[3, 1], 1, -srcpts[3, 0] * dstpts[3, 1], -srcpts[3, 1] * dstpts[3, 1]],
]
b = [dstpts[0, 0], dstpts[0, 1], dstpts[1, 0], dstpts[1, 1], dstpts[2, 0], dstpts[2, 1], dstpts[3, 0], dstpts[3, 1]]
coeffs = np.linalg.solve(np.array(A), np.array(b))
xform = \
[
[coeffs[0], coeffs[1], coeffs[2]],
[coeffs[3], coeffs[4], coeffs[5]],
[coeffs[6], coeffs[7], 1]
]
return np.array(xform)
# Transform back to the unaligned quad.
c = build_perspective(
[[0, 0], [0, 1024], [1024, 1024], [1024, 0]],
quad + 0.5,
)
c = np.linalg.inv(c)
# Upperscale with pytorch
aligned_pil = PIL.Image.fromarray(aligned_image)
aligned_pil = fn.resize(aligned_pil, size=[1024])
fill_mask = PIL.Image.fromarray(np.ones_like(aligned_pil, dtype=np.uint8) * 255)
# Inverse to `unaligned = aligned_pil.transform((1024, 1024), PIL.Image.PERSPECTIVE, c.reshape(9)[0:8], Image.BICUBIC)``
unaligned = aligned_pil.transform(
(padded_img.width, padded_img.height),
Image.PERSPECTIVE, c.reshape(9)[0:8], Image.BICUBIC
)
unaligned_mask = fill_mask.transform(
(padded_img.width, padded_img.height),
Image.PERSPECTIVE, c.reshape(9)[0:8], Image.BICUBIC
)
# "Unpad"
unaligned = np.array(unaligned)[padding[1]:unaligned.height-padding[3], padding[0]:unaligned.width-padding[2], :]
unaligned_mask = np.array(unaligned_mask)[padding[1]:unaligned_mask.height-padding[3], padding[0]:unaligned_mask.width-padding[2], :]
# Ideally get rid of the blur added with padding, but that's not as trivial..
# Uncrop.
canvas = | np.empty((shrunk_image.height, shrunk_image.width, unaligned.shape[2]), dtype=unaligned.dtype) | numpy.empty |
'''
Vocoder classes to parametrize/deparametrize a waveform.
This should be seen and developped as a completely independent module.
(e.g independent of PercivalTTS and any ML backend)
Copyright(C) 2017 Engineering Department, University of Cambridge, UK.
License
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Author
<NAME> <<EMAIL>>
'''
import os
import numpy as np
from external.pulsemodel import sigproc as sp
from external import pulsemodel
class Vocoder:
_name = None
shift = None
fs = None
mlpg_wins = None
def __init__(self, name, fs, shift, mlpg_wins=None):
self._name = name
self.fs = fs
self.shift = shift
self.mlpg_wins = mlpg_wins
def preprocwav(self, wav, fs, highpass=None):
'''
Should always be called at the beginning of the analysis function accessing the waveform.
'''
if fs!=self.fs:
print(' Resampling the waveform (new fs={}Hz)'.format(self.fs))
wav = sp.resample(wav, fs, self.fs, method=2, deterministic=True)
fs = self.fs
if not highpass is None:
print(' High-pass filter the waveform (cutt-off={}Hz)'.format(highpass))
from scipy import signal as sig
b, a = sig.butter(4, highpass/(self.fs/0.5), btype='high')
wav = sig.filtfilt(b, a, wav)
wav = np.ascontiguousarray(wav) # Often necessary for some cython implementations
return wav
# if pp_spec_extrapfreq>0:
# idxlim = int(dftlen*pp_spec_extrapfreq/self.fs)
# for n in xrange(SPEC.shape[0]):
# SPEC[n,idxlim:] = SPEC[n,idxlim]
#
# if pp_spec_pf_coef>0:
# # A fast version of formant enhancer
# for n in xrange(SPEC.shape[0]):
# #if n*0.005<1.085: continue
# # Post-processing similar to Merlin's
# # But really NOT equivalent
# # This one creates way more low-pass effect with same coef (1.4)
# cc = np.fft.irfft(np.log(abs(SPEC[n,:])))
# cc = cc[:int(dftlen/2)+1]
# cc[1:] = 2.0*cc[1:]
# cc[2:] = pp_spec_pf_coef*cc[2:]
# spec_pp = abs(np.exp(np.fft.rfft(cc, dftlen)))
# if 0:
# import matplotlib.pyplot as plt
# plt.ion()
# plt.clf()
# FF = self.fs*np.arange(dftlen/2+1)/dftlen
# plt.plot(FF, sp.mag2db(SPEC[n,:]), 'k')
# plt.plot(FF, sp.mag2db(spec_pp), 'b')
# from IPython.core.debugger import Pdb; Pdb().set_trace()
# SPEC[n,:] = spec_pp
def __str__(self):
return '{} (fs={}, shift={})'.format(self.name(), self.fs, self.shift)
def name(self): return self._name
def featuressizeraw(self):
'''
This is the size of the acoustic feature vector, without deltas for MLPG
'''
raise ValueError('This member function has to be re-implemented in the sub-classes') # pragma: no cover
def featuressize(self):
if not self.mlpg_wins is None: return self.featuressizeraw()*(len(self.mlpg_wins)+1)
else: return self.featuressizeraw()
def f0size(self): return -1
def specsize(self): return -1
def noisesize(self): return -1
def vuvsize(self): return -1
# Please add any other potential feature here, while respecting the expected order
# Objective measures member functions for any vocoder
features_err = dict()
def objmeasures_clear(self): self.features_err=dict()
def objmeasures_stats(self):
for key in self.features_err:
print('{}: {}'.format(key, np.mean(np.vstack(self.features_err[key]))))
class VocoderF0Spec(Vocoder):
spec_type = None
spec_size = None
dftlen = 4096
def __init__(self, name, fs, shift, spec_size, spec_type='fwbnd', dftlen=4096, mlpg_wins=None):
Vocoder.__init__(self, name, fs, shift, mlpg_wins=mlpg_wins)
self.spec_size = spec_size
self.spec_type = spec_type # 'fwbnd' 'mcep'
self.dftlen = dftlen
def f0size(self): return 1
def specsize(self): return self.spec_size
# Utility functions for this class of vocoder
def compress_spectrum(self, SPEC, spec_type, spec_size):
dftlen = (SPEC.shape[1]-1)*2
if self.spec_type=='fwbnd':
COMPSPEC = sp.linbnd2fwbnd(np.log(abs(SPEC)), self.fs, dftlen, spec_size)
elif self.spec_type=='mcep': # pragma: no cover Need SPTK to test this
# TODO test
COMPSPEC = sp.spec2mcep(SPEC*self.fs, sp.bark_alpha(self.fs), spec_size-1)
return COMPSPEC
def decompress_spectrum(self, COMPSPEC, spec_type, pp_mcep=False):
if self.spec_type=='fwbnd':
SPEC = np.exp(sp.fwbnd2linbnd(COMPSPEC, self.fs, self.dftlen, smooth=True))
if pp_mcep: # pragma: no cover Would need SPTK to test it
print(' Merlin/SPTK Post-proc on MCEP')
import external.merlin.generate_pp
mcep = sp.spec2mcep(SPEC*self.fs, sp.bark_alpha(self.fs), 256) # Arbitrary high order
mcep_pp = external.merlin.generate_pp.mcep_postproc_sptk(mcep, self.fs, dftlen=self.dftlen) # Apply Merlin's post-proc on spec env
SPEC = sp.mcep2spec(mcep_pp, sp.bark_alpha(self.fs), dftlen=self.dftlen)/self.fs
elif self.spec_type=='mcep':# pragma: no cover Would need SPTK to test it
# TODO test
if pp_mcep:
print(' Merlin/SPTK Post-proc on MCEP')
import external.merlin.generate_pp
COMPSPEC = external.merlin.generate_pp.mcep_postproc_sptk(COMPSPEC, self.fs, dftlen=self.dftlen) # Apply Merlin's post-proc on spec env
SPEC = sp.mcep2spec(COMPSPEC, sp.bark_alpha(self.fs), dftlen=self.dftlen)
return SPEC
class VocoderPML(VocoderF0Spec):
nm_size = None
def __init__(self, fs, shift, spec_size, nm_size, dftlen=4096, mlpg_wins=None):
VocoderF0Spec.__init__(self, 'PML', fs, shift, spec_size, 'fwbnd', dftlen, mlpg_wins=mlpg_wins)
self.nm_size = nm_size
def featuressizeraw(self):
return 1+self.spec_size+self.nm_size
def noisesize(self): return self.nm_size
def analysisf(self, fwav, ff0, f0_min, f0_max, fspec, fnm, **kwargs):
print('Extracting PML features from: '+fwav)
if ('preproc_hp' in kwargs) and (kwargs['preproc_hp']=='auto'):
kwargs['preproc_hp']=f0_min
# through args `preproc_fs` and `preproc_hp` pulsemodel.analysisf takes care of self.preprocwav
pulsemodel.analysisf(fwav, shift=self.shift, f0estimator='REAPER', f0_min=f0_min, f0_max=f0_max, ff0=ff0, f0_log=True, fspec=fspec, spec_nbfwbnds=self.spec_size, fnm=fnm, nm_nbfwbnds=self.nm_size, preproc_fs=self.fs, **kwargs)
def analysisfid(self, fid, wav_path, f0_min, f0_max, outputpathdicts, **kwargs): # pragma: no cover coverage not detected
return self.analysisf(wav_path.replace('*',fid), outputpathdicts['f0'].replace('*',fid), f0_min, f0_max, outputpathdicts['spec'].replace('*',fid), outputpathdicts['noise'].replace('*',fid), **kwargs)
def synthesis(self, CMP, pp_mcep=False, pp_f0_smooth=None):
f0 = CMP[:,0]
f0 = np.exp(f0)
SPEC = self.decompress_spectrum(CMP[:,1:1+self.spec_size], self.spec_type, pp_mcep=pp_mcep)
NM = CMP[:,1+self.spec_size:1+self.spec_size+self.nm_size]
NM = sp.fwbnd2linbnd(NM, self.fs, self.dftlen)
syn = pulsemodel.synthesis.synthesize(self.fs, np.vstack((self.shift*np.arange(len(f0)), f0)).T, SPEC, NM=NM, nm_cont=False, pp_atten1stharminsilences=-25, pp_f0_smooth=pp_f0_smooth)
return syn
# Objective measures
def objmeasures_add(self, CMP, REF):
f0trg = np.exp(REF[:,0])
f0gen = np.exp(CMP[:,0])
self.features_err.setdefault('F0[Hz]', []).append(np.sqrt(np.mean((f0trg-f0gen)**2)))
spectrg = sp.log2db(REF[:,1:1+self.spec_size])
specgen = sp.log2db(CMP[:,1:1+self.spec_size])
self.features_err.setdefault('SPEC[dB]', []).append(np.sqrt(np.mean((spectrg-specgen)**2, 0)))
nmtrg = REF[:,1+self.spec_size:1+self.spec_size+self.nm_size]
nmgen = CMP[:,1+self.spec_size:1+self.spec_size+self.nm_size]
self.features_err.setdefault('NM', []).append(np.sqrt(np.mean((nmtrg-nmgen)**2, 0)))
class VocoderWORLD(VocoderF0Spec):
aper_size = None
def __init__(self, fs, shift, spec_size, aper_size, dftlen=4096, mlpg_wins=None):
VocoderF0Spec.__init__(self, 'WORLD', fs, shift, spec_size, 'fwbnd', dftlen, mlpg_wins=mlpg_wins)
self.aper_size = aper_size
def featuressizeraw(self):
return 1+self.spec_size+self.aper_size+1
def noisesize(self): return self.aper_size
def vuvsize(self): return 1
def analysisf(self, fwav, ff0, f0_min, f0_max, fspec, faper, fvuv, **kwargs):
print('Extracting WORLD features from: '+fwav)
wav, fs, _ = sp.wavread(fwav)
if ('preproc_hp' in kwargs):
if kwargs['preproc_hp']=='auto': kwargs['preproc_hp']=f0_min
self.preprocwav(wav, fs, highpass=kwargs['preproc_hp'])
else:
self.preprocwav(wav, fs)
import pyworld as pw
if 0:
# Check direct copy re-synthesis without compression/encoding
print(pw.__file__)
# _f0, ts = pw.dio(wav, fs, f0_floor=f0_min, f0_ceil=f0_max, channels_in_octave=2, frame_period=self.shift*1000.0)
_f0, ts = pw.dio(wav, fs, f0_floor=f0_min, f0_ceil=f0_max, channels_in_octave=2, frame_period=self.shift*1000.0)
# _f0, ts = pw.harvest(wav, fs)
f0 = pw.stonemask(wav, _f0, ts, fs)
SPEC = pw.cheaptrick(wav, f0, ts, fs, fft_size=self.dftlen)
APER = pw.d4c(wav, f0, ts, fs, fft_size=self.dftlen)
resyn = pw.synthesize(f0.astype('float64'), SPEC.astype('float64'), APER.astype('float64'), fs, self.shift*1000.0)
sp.wavwrite('resynth.wav', resyn, fs, norm_abs=True, force_norm_abs=True, verbose=1)
from IPython.core.debugger import Pdb; Pdb().set_trace()
_f0, ts = pw.dio(wav, fs, f0_floor=f0_min, f0_ceil=f0_max, channels_in_octave=2, frame_period=self.shift*1000.0)
f0 = pw.stonemask(wav, _f0, ts, fs)
SPEC = pw.cheaptrick(wav, f0, ts, fs, fft_size=self.dftlen)
# SPEC = 10.0*np.sqrt(SPEC) # TODO Best gain correction I could find. Hard to find the good one between PML and WORLD different syntheses
APER = pw.d4c(wav, f0, ts, fs, fft_size=self.dftlen)
unvoiced = np.where(f0<20)[0]
f0 = np.interp(ts, ts[f0>0], f0[f0>0])
f0 = np.log(f0)
makedirs(os.path.dirname(ff0))
f0.astype('float32').tofile(ff0)
vuv = np.ones(len(f0))
vuv[unvoiced] = 0
makedirs(os.path.dirname(fvuv))
vuv.astype('float32').tofile(fvuv)
SPEC = self.compress_spectrum(SPEC, fs, self.spec_size)
makedirs(os.path.dirname(fspec))
SPEC.astype('float32').tofile(fspec)
APER = sp.linbnd2fwbnd(APER, fs, self.dftlen, self.aper_size)
APER = sp.mag2db(APER)
makedirs(os.path.dirname(faper))
APER.astype('float32').tofile(faper)
# CMP = np.concatenate((f0.reshape((-1,1)), SPEC, APER, vuv.reshape((-1,1))), axis=1) # (This is not a necessity)
if 0:
import matplotlib.pyplot as plt
plt.ion()
resyn = self.synthesis(CMP)
sp.wavwrite('resynth.wav', resyn, fs, norm_abs=True, force_norm_abs=True, verbose=1)
from IPython.core.debugger import Pdb; Pdb().set_trace()
# return CMP
def analysisfid(self, fid, wav_path, f0_min, f0_max, outputpathdicts, **kwargs): # pragma: no cover coverage not detected
return self.analysisf(wav_path.replace('*',fid), outputpathdicts['f0'].replace('*',fid), f0_min, f0_max, outputpathdicts['spec'].replace('*',fid), outputpathdicts['noise'].replace('*',fid), outputpathdicts['vuv'].replace('*',fid), **kwargs)
def synthesis(self, CMP, pp_mcep=False, pp_f0_smooth=None):
if not pp_f0_smooth is None: raise ValueError('VocoderWORLD synthesis does not include an f0 smoother, please use `pp_f0_smooth=None`')
import pyworld as pw
f0 = CMP[:,0]
f0 = | np.exp(f0) | numpy.exp |
import numpy as np
import os
import re
import requests
import sys
import time
from netCDF4 import Dataset
import pandas as pd
from bs4 import BeautifulSoup
from tqdm import tqdm
# setup constants used to access the data from the different M2M interfaces
BASE_URL = 'https://ooinet.oceanobservatories.org/api/m2m/' # base M2M URL
SENSOR_URL = '12576/sensor/inv/' # Sensor Information
# setup access credentials
AUTH = ['OOIAPI-853A3LA6QI3L62', '<KEY>']
def M2M_Call(uframe_dataset_name, start_date, end_date):
options = '?beginDT=' + start_date + '&endDT=' + end_date + '&format=application/netcdf'
r = requests.get(BASE_URL + SENSOR_URL + uframe_dataset_name + options, auth=(AUTH[0], AUTH[1]))
if r.status_code == requests.codes.ok:
data = r.json()
else:
return None
# wait until the request is completed
print('Waiting for OOINet to process and prepare data request, this may take up to 20 minutes')
url = [url for url in data['allURLs'] if re.match(r'.*async_results.*', url)][0]
check_complete = url + '/status.txt'
with tqdm(total=400, desc='Waiting') as bar:
for i in range(400):
r = requests.get(check_complete)
bar.update(1)
if r.status_code == requests.codes.ok:
bar.n = 400
bar.last_print_n = 400
bar.refresh()
print('\nrequest completed in %f minutes.' % elapsed)
break
else:
time.sleep(3)
elapsed = (i * 3) / 60
return data
def M2M_Files(data, tag=''):
"""
Use a regex tag combined with the results of the M2M data request to collect the data from the THREDDS catalog.
Collected data is gathered into an xarray dataset for further processing.
:param data: JSON object returned from M2M data request with details on where the data is to be found for download
:param tag: regex tag to use in discriminating the data files, so we only collect the correct ones
:return: the collected data as an xarray dataset
"""
# Create a list of the files from the request above using a simple regex as a tag to discriminate the files
url = [url for url in data['allURLs'] if re.match(r'.*thredds.*', url)][0]
files = list_files(url, tag)
return files
def list_files(url, tag=''):
"""
Function to create a list of the NetCDF data files in the THREDDS catalog created by a request to the M2M system.
:param url: URL to user's THREDDS catalog specific to a data request
:param tag: regex pattern used to distinguish files of interest
:return: list of files in the catalog with the URL path set relative to the catalog
"""
page = requests.get(url).text
soup = BeautifulSoup(page, 'html.parser')
pattern = re.compile(tag)
return [node.get('href') for node in soup.find_all('a', text=pattern)]
def M2M_Data(nclist,variables):
thredds = 'https://opendap.oceanobservatories.org/thredds/dodsC/ooi/'
#nclist is going to contain more than one url eventually
for jj in range(len(nclist)):
url=nclist[jj]
url=url[25:]
dap_url = thredds + url + '#fillmismatch'
openFile = Dataset(dap_url,'r')
for ii in range(len(variables)):
dum = openFile.variables[variables[ii].name]
variables[ii].data = np.append(variables[ii].data, dum[:].data)
tmp = variables[0].data/60/60/24
time_converted = pd.to_datetime(tmp, unit='D', origin=pd.Timestamp('1900-01-01'))
return variables, time_converted
class var(object):
def __init__(self):
"""A Class that generically holds data with a variable name
and the units as attributes"""
self.name = ''
self.data = np.array([])
self.units = ''
def __repr__(self):
return_str = "name: " + self.name + '\n'
return_str += "units: " + self.units + '\n'
return_str += "data: size: " + str(self.data.shape)
return return_str
class structtype(object):
def __init__(self):
""" A class that imitates a Matlab structure type
"""
self._data = []
def __getitem__(self, index):
"""implement index behavior in the struct"""
if index == len(self._data):
self._data.append(var())
return self._data[index]
def __len__(self):
return len(self._data)
def M2M_URLs(platform_name,node,instrument_class,method):
var_list = structtype()
#MOPAK
if platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSPM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/SBS01/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#METBK
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
#FLORT
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/06-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/06-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/04-FLORTK000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
#FDCHP
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/08-FDCHPA000/telemetered/fdchp_a_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#DOSTA
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/02-DOFSTK000/telemetered/dofst_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
#ADCP
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/01-ADCPTA000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/01-ADCPTC000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/01-ADCPTA000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/01-ADCPTC000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/04-ADCPTM000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/04-ADCPTM000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/04-ADCPTC000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/04-ADCPSJ000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
#ZPLSC
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#WAVSS
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
#VELPT
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
#PCO2W
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
#PHSEN
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
#SPKIR
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
#PRESF
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/02-PRESFA000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/02-PRESFA000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/02-PRESFB000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/02-PRESFC000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
#CTDBP
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/06-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/06-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/03-CTDBPE000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
#VEL3D
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
#VEL3DK
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/01-VEL3DK000/telemetered/vel3d_k_wfp_stc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/03-CTDPFK000/telemetered/ctdpf_ckl_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
#PCO2A
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
#PARAD
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/05-PARADK000/telemetered/parad_k__stc_imodem_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
#OPTAA
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/01-OPTAAC000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#NUTNR
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
##
#MOPAK
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/SBD17/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/SBD17/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSPM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSPM/SBS01/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#METBK
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
#FLORT
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/SBD17/06-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/SBD17/06-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
#FDCHP
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/08-FDCHPA000/recovered_host/fdchp_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#DOSTA
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD37/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD37/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD37/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD37/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
#ADCP
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/01-ADCPTA000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/01-ADCPTC000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/01-ADCPTA000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/01-ADCPTC000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/04-ADCPTM000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/04-ADCPTM000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/04-ADCPTC000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/04-ADCPSJ000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
#WAVSS
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
#VELPT
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/SBD17/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD11/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD11/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
#uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/recovered_host/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD11/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD11/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
#PCO2W
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
#PHSEN
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
#SPKIR
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
#PRESF
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/02-PRESFA000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/02-PRESFA000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/02-PRESFB000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/02-PRESFC000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
#CTDBP
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD37/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/SBD17/06-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD37/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/SBD17/06-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD37/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD37/03-CTDBPE000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
#VEL3D
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/01-VEL3DD000/recovered_host/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/01-VEL3DD000/recovered_host/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/01-VEL3DD000/recovered_host/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/01-VEL3DD000/recovered_host/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
#PCO2A
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
#OPTAA
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD37/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD37/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD37/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD37/01-OPTAAC000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#NUTNR
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD37/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/SBD17/06-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD37/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/SBD17/06-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD37/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD37/03-CTDBPE000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE09OSPM/WFP01/03-CTDPFK000/recovered_wfp/ctdpf_ckl_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID26/01-ADCPTA000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID26/01-ADCPTC000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID26/01-ADCPTA000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID26/01-ADCPTC000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/04-ADCPTM000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/04-ADCPTM000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD35/04-ADCPTC000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD35/04-ADCPSJ000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/SBD17/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/SBD11/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/SBD11/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/SBD17/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/SBD11/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/SBD11/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE09OSPM/WFP01/01-VEL3DK000/recovered_wfp/vel3d_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/01-VEL3DD000/recovered_inst/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/01-VEL3DD000/recovered_inst/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD35/01-VEL3DD000/recovered_inst/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD35/01-VEL3DD000/recovered_inst/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/02-PRESFA000/recovered_inst/presf_abc_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'presf_tide_pressure'
var_list[2].name = 'presf_tide_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/02-PRESFA000/recovered_inst/presf_abc_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'presf_tide_pressure'
var_list[2].name = 'presf_tide_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD35/02-PRESFB000/recovered_inst/presf_abc_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'presf_tide_pressure'
var_list[2].name = 'presf_tide_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD35/02-PRESFC000/recovered_inst/presf_abc_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'presf_tide_pressure'
var_list[2].name = 'presf_tide_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD35/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD35/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD35/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD35/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE09OSPM/WFP01/05-PARADK000/recovered_wfp/parad_k__stc_imodem_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID26/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID26/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID26/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID26/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/SBD12/08-FDCHPA000/recovered_inst/fdchp_a_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/SBD17/06-FLORTD000/recovered_inst/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/SBD17/06-FLORTD000/recovered_inst/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE09OSPM/WFP01/04-FLORTK000/recovered_wfp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE09OSPM/WFP01/02-DOFSTK000/recovered_wfp/dofst_k_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD37/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD37/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD37/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD37/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/04-ADCPTM000/recovered_inst/adcpt_m_instrument_log9_recovered'
var_list[0].name = 'time'
var_list[1].name = 'significant_wave_height'
var_list[2].name = 'peak_wave_period'
var_list[3].name = 'peak_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'seconds'
var_list[3].units = 'degrees'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/04-ADCPTM000/recovered_inst/adcpt_m_instrument_log9_recovered'
var_list[0].name = 'time'
var_list[1].name = 'significant_wave_height'
var_list[2].name = 'peak_wave_period'
var_list[3].name = 'peak_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'seconds'
var_list[3].units = 'degrees'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'CTD' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/06-CTDBPN106/streamed/ctdbp_no_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_no_seawater_pressure'
var_list[5].name = 'ctdbp_no_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'CTD' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/06-CTDBPO108/streamed/ctdbp_no_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_no_seawater_pressure'
var_list[5].name = 'ctdbp_no_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'DOSTA' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/06-CTDBPN106/streamed/ctdbp_no_sample'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'DOSTA' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/06-CTDBPO108/streamed/ctdbp_no_sample'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'PHSEN' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/10-PHSEND103/streamed/phsen_data_record'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'PHSEN' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/10-PHSEND107/streamed/phsen_data_record'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'PCO2W' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/09-PCO2WB103/streamed/pco2w_b_sami_data_record'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'PCO2W' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/09-PCO2WB104/streamed/pco2w_b_sami_data_record'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'ADCP' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/05-ADCPTB104/streamed/adcp_velocity_beam'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'ADCP' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/05-ADCPSI103/streamed/adcp_velocity_beam'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'VEL3D' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/07-VEL3DC108/streamed/vel3d_cd_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'VEL3D' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/07-VEL3DC107/streamed/vel3d_cd_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'OPTAA' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/08-OPTAAD106/streamed/optaa_sample'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'OPTAA' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/08-OPTAAC104/streamed/optaa_sample'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#CSPP Data below
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/08-FLORTJ000/telemetered/flort_dj_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/08-FLORTJ000/recovered_cspp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/08-FLORTJ000/telemetered/flort_dj_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/08-FLORTJ000/recovered_cspp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/02-DOSTAJ000/telemetered/dosta_abcdjm_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/02-DOSTAJ000/recovered_cspp/dosta_abcdjm_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/02-DOSTAJ000/telemetered/dosta_abcdjm_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/02-DOSTAJ000/recovered_cspp/dosta_abcdjm_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/09-CTDPFJ000/telemetered/ctdpf_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/09-CTDPFJ000/recovered_cspp/ctdpf_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/09-CTDPFJ000/telemetered/ctdpf_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/09-CTDPFJ000/recovered_cspp/ctdpf_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/10-PARADJ000/telemetered/parad_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/10-PARADJ000/recovered_cspp/parad_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/10-PARADJ000/telemetered/parad_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/10-PARADJ000/recovered_cspp/parad_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'NUTNR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/06-NUTNRJ000/recovered_cspp/nutnr_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'salinity_corrected_nitrate'
var_list[2].name = 'nitrate_concentration'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'NUTNR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/06-NUTNRJ000/recovered_cspp/nutnr_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'salinity_corrected_nitrate'
var_list[2].name = 'nitrate_concentration'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/07-SPKIRJ000/telemetered/spkir_abj_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/07-SPKIRJ000/recovered_cspp/spkir_abj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/07-SPKIRJ000/telemetered/spkir_abj_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/07-SPKIRJ000/recovered_cspp/spkir_abj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/05-VELPTJ000/telemetered/velpt_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/05-VELPTJ000/recovered_cspp/velpt_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/05-VELPTJ000/telemetered/velpt_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/05-VELPTJ000/recovered_cspp/velpt_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'OPTAA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/04-OPTAAJ000/recovered_cspp/optaa_dj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'OPTAA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/04-OPTAAJ000/recovered_cspp/optaa_dj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/07-FLORTJ000/recovered_cspp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/07-FLORTJ000/recovered_cspp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/01-DOSTAJ000/recovered_cspp/dosta_abcdjm_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/01-DOSTAJ000/recovered_cspp/dosta_abcdjm_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/08-CTDPFJ000/recovered_cspp/ctdpf_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/08-CTDPFJ000/recovered_cspp/ctdpf_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/09-PARADJ000/recovered_cspp/parad_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/09-PARADJ000/recovered_cspp/parad_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'NUTNR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/05-NUTNRJ000/recovered_cspp/nutnr_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'salinity_corrected_nitrate'
var_list[2].name = 'nitrate_concentration'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'NUTNR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/05-NUTNRJ000/recovered_cspp/nutnr_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'salinity_corrected_nitrate'
var_list[2].name = 'nitrate_concentration'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/06-SPKIRJ000/recovered_cspp/spkir_abj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/06-SPKIRJ000/recovered_cspp/spkir_abj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/02-VELPTJ000/recovered_cspp/velpt_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/02-VELPTJ000/recovered_cspp/velpt_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'OPTAA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/04-OPTAAJ000/recovered_cspp/optaa_dj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'OPTAA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/04-OPTAAJ000/recovered_cspp/optaa_dj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL386/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL386/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL384/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL384/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL383/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL383/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL382/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL382/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL381/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL381/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL327/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL327/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL326/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL326/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL320/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL320/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL319/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL319/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL312/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL312/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL311/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL311/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL247/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL247/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL386/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL386/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL384/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL384/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL383/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL383/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL382/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL382/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL381/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL381/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL327/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL327/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL326/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL326/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL320/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL320/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL319/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL319/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL312/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL312/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL311/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL311/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL247/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL247/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL386/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL386/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL384/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL384/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL383/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL383/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL382/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL382/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL381/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL381/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL327/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL327/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL326/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL326/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL320/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL320/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL319/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL319/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL312/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL312/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL311/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL311/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL247/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL247/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL386/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL386/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL384/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL384/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL383/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL383/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL382/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL382/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL381/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL381/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL327/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL327/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL326/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL326/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL320/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL320/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL319/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL319/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL312/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL312/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL311/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL311/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL247/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL247/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL386/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL384/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL383/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL382/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL381/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL327/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL326/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL320/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL319/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL312/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL311/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL247/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD11/06-METBKA000/telemetered/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD11/06-METBKA000/recovered_host/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD11/06-METBKA000/telemetered/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD11/06-METBKA000/recovered_host/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD11/06-METBKA000/telemetered/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD11/06-METBKA000/recovered_host/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD11/06-METBKA000/telemetered/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD11/06-METBKA000/recovered_host/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_mean_directional'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_mean_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_mean_directional'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_mean_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_mean_directional'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_mean_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_mean_directional'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_mean_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_non_directional'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_non_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_non_directional'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_non_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_non_directional'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_non_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_non_directional'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_non_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_motion'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_motion_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = | np.array([]) | numpy.array |
"""Generates finely spaced grid of SNII, AGB, and SNIa yields.
Generates a finely spaced grid of SN II isotopic yields from Woosley & Weaver
(1995), AGB isotopic yields from Renzini & Voli (1981), and SNIa yields from
Thielemann, Nomoto, & Yokoi (1986).
Woosley & Weaver (1995): M = 11--40 Msun; Z = 0--solar
Renzini & Voli (1981): M = 1--8 Msun; Z = 0--solar
Thielemann et al. (1986): W7 model from Nomoto et al. (1984)
Timmes already converted Ni56 to Fe56 in the maltov1.orig file (WW95 doesn't
account for its decay).
"""
from __future__ import print_function, division, absolute_import
import os
from os.path import join
import sys
import copy
import numpy as np
from scipy import interpolate
import pandas as pd
# ---- Set Paths -----
path_calc_yields = join(os.path.abspath(os.path.dirname(__file__)), '')
path_flexce = join('/'.join(path_calc_yields.split('/')[:-2]), '')
path_fileio = join(path_flexce, 'fileio')
path_data = join(path_flexce, 'data')
path_yields = join(path_data, 'yields')
path_yldgen = join(path_yields, 'general')
path_ww95 = join(path_yields, 'ww95')
path_ww95_orig = join(path_ww95, 'orig')
path_ww95_half_fe = join(path_ww95, 'half_fe')
# path_ww95_half_fe_only = join(path_ww95, 'half_fe_only')
# path_rv81 = join(path_yields, 'renzini81'
# path_tny86 = join(path_yields, 'thielemann86'
sys.path.append(path_fileio)
# -------------------
from pickle_io import pickle_read
from pickle_io import pickle_write
if not os.path.isdir(path_ww95_orig):
os.mkdir(path_ww95_orig)
if not os.path.isdir(path_ww95_half_fe):
os.mkdir(path_ww95_half_fe)
# ---- WW95 Yields -----
z = open(join(path_ww95, 'maltov1.orig'), 'r')
sym = [] # symbol names
sym_metallicity = [] # [symbol--metallicity pairs]
bbmf = [] # big bang mass fraction
sneIa_orig = []
# ww95_orig[80 symbols + 5 metallicities/sym = 400][[25 masses], [25 yields]]
ww95_orig = []
tmp_Ia = 0
tmp = 0
for row in z:
if 'symbol name' in row:
sym_tmp = row.split()[0]
sym.append(sym_tmp)
if 'big bang mass fraction' in row:
bbmf.append(float(row.split()[0]))
if 'w7 tny86' in row:
yields_Ia = []
tmp_Ia = 6
if tmp_Ia > 0:
yields_Ia.append(float(row.split()[0]))
tmp_Ia -= 1
if tmp_Ia == 0:
sneIa_orig.append(np.array(yields_Ia))
if '* metallicity' in row:
metal_tmp = float(row.split()[0])
sym_metallicity.append([sym_tmp, metal_tmp])
if 'rv81 stellar mass & yield' in row:
mass = []
yields = []
tmp = 25
if tmp > 0:
mass.append(float(row.split()[0]))
yields.append(float(row.split()[1]))
tmp -= 1
if tmp == 0:
ww95_orig.append([np.array(mass), np.array(yields)])
z.close()
sym = np.array(sym)
sym_mass = np.array([int(sym[i][-1]) if i < 7 else int(sym[i][-2:])
for i in range(len(sym) - 1)])
sym_metallicity = np.array(sym_metallicity)
bbmf = np.array(bbmf)[:-1]
sneIa_orig = np.array(sneIa_orig)
tnyIa = sneIa_orig[:, 0]
ww95_orig = np.array(ww95_orig)
# all symbols have 25 masses and yields and 5 metallicity values:
ww95_mass = ww95_orig[0][0]
ww95_mass2 = np.concatenate([ww95_mass for i in range(5)])
ww95_metal = np.array([0.00e+00, 1.90e-06, 1.90e-04, 1.90e-03, 1.90e-02])
ww95_metal2 = np.concatenate([np.ones(25) * ww95_metal[i] for i in range(5)])
n_sym = len(sym)
n_iso = len(sym) - 1
n_metal = len(sym) - 5
n_yield = len(ww95_orig)
# ----------------------
# ---- CL04 Data ----
species_in = pd.read_csv(join(path_yldgen, 'species.txt'),
delim_whitespace=True, skiprows=1, usecols=[1],
names=['name'])
species = np.array(species_in['name'])
n_species = len(species)
# match isotopes from WW95 yields to CL04 yields
sym2 = np.array([sym[i].title() for i in range(len(sym))])
ind_sp = []
for i in range(n_sym):
if sym2[i] in species:
tmp = np.where(sym2[i] == species)[0][0]
ind_sp.append(tmp)
else:
pass
# print 'sym[%i]' % (i), '(%s)' % (sym[i]), 'not in species array'
ind_sp = np.array(ind_sp)
# solar abundance of metals---needed to subtract the initial metal abundances
# of the stellar models (also assume Y = 0.285)---in relative amounts (not
# Msun), that is, sum(solar_ab) = 1.
solar_isotopes = pd.read_csv(join(path_yldgen, 'Solar_isotopes.txt'),
delim_whitespace=True, skiprows=1,
usecols=[0, 1], names=['name', 'ab'])
solar_iso = np.array(solar_isotopes['name'])
solar_ab = np.array(solar_isotopes['ab'])
# indices within "species" array of the elements for which CL04 give a solar
# abundance (Note: WW95 also used the Anders & Grevesse 1989 solar abundance)
ind_iso = []
for i in range(len(solar_iso)):
ind_iso.append(np.where(solar_iso[i] == species)[0][0])
ind_iso = np.array(ind_iso)
# -------------------
# --- Calculate Net Yields ---
# WW95 absolute yields (125 mass/metallicity pairs, 293 isotopes)
ww95_orig2 = ww95_orig.reshape(80, 5, 2, 25)
ww95_orig3 = ww95_orig2[:, :, 1]
ww95_orig4 = ww95_orig3.reshape(80, 125).T
ww95_abs = np.zeros((125, n_species))
for i in range(125):
for j in range(79):
ww95_abs[i, ind_sp[j]] = ww95_orig4[i, j]
# WW95 mass ejected
ww95_mej = np.sum(ww95_abs, axis=1)
# WW95 remnant mass
ww95_rem = ww95_mass2 - ww95_mej
# The remnant masses reported by WW95 but the sum(abs yields) + remnant mass !=
# mass of star, so for accouting purposes it will be best to calculate remnant
# mass = mass of star - sum(abs yields).
# WW95 reported remnant masses:
# ww95_rem = ww95_orig4[:, -1]
# WW95 initial composition
ww95_init_comp = np.zeros(ww95_abs.shape)
for i in range(5):
indt = | np.arange(25*i, 25*i+25) | numpy.arange |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue May 19 21:21:57 2020
@author: lukepinkel
"""
import numpy as np
import scipy as sp
from ..utilities.linalg_operations import (_check_np, _check_shape)
from .links import (Link, IdentityLink, ReciprocalLink, LogLink, LogitLink,
PowerLink)
LN2PI = np.log(2.0 * np.pi)
FOUR_SQRT2 = 4.0 * np.sqrt(2.0)
def _logbinom(n, k):
y=sp.special.gammaln(n+1)-sp.special.gammaln(k+1)-sp.special.gammaln(n-k+1)
return y
class ExponentialFamily(object):
def __init__(self, link=IdentityLink, weights=1.0, scale=1.0):
if not isinstance(link, Link):
link = link()
self._link = link
self.weights = weights
self.scale = scale
def _to_mean(self, eta=None, T=None):
if eta is not None:
mu = self.inv_link(eta)
else:
mu = self.mean_func(T)
return mu
def link(self, mu):
return self._link.link(mu)
def inv_link(self, eta):
return self._link.inv_link(eta)
def dinv_link(self, eta):
return self._link.dinv_link(eta)
def d2inv_link(self, eta):
return self._link.d2inv_link(eta)
def dlink(self, mu):
return 1.0 / self.dinv_link(self.link(mu))
def d2link(self, mu):
eta = self.link.link(mu)
res = -self.d2inv_link(eta) / np.power(self.dinv_link(eta), 3)
return res
def cshape(self, y, mu):
y = _check_shape(_check_np(y), 1)
mu = _check_shape(_check_np(mu), 1)
return y, mu
def loglike(self, y, eta=None, mu=None, T=None, scale=1.0):
return np.sum(self._loglike(y, eta, mu, T, scale))
def full_loglike(self, y, eta=None, mu=None, T=None, scale=1.0):
return np.sum(self._full_loglike(y, eta, mu, T, scale))
def pearson_resid(self, y, eta=None, mu=None, T=None, scale=1.0):
if mu is None:
mu = self._to_mean(eta=eta, T=T)
y, mu = self.cshape(y, mu)
V = self.var_func(mu)
r_p = (y - mu) / np.sqrt(V)
return r_p
def signed_resid(self, y, eta=None, mu=None, T=None, scale=1.0):
if mu is None:
mu = self._to_mean(eta=eta, T=T)
y, mu = self.cshape(y, mu)
d = self.deviance(y, mu=mu)
r_s = np.sign(y - mu) * np.sqrt(d)
return r_s
def gw(self, y, mu, phi=1.0):
y, mu = self.cshape(y, mu)
num = self.weights * (y - mu)
den = self.var_func(mu=mu) * self.dlink(mu) * phi
res = num / den
return -res
def hw(self, y, mu, phi=1.0):
y, mu = self.cshape(y, mu)
eta = self.link(mu)
Vinv = 1.0 / (self.var_func(mu=mu))
W0 = self.dinv_link(eta)**2
W1 = self.d2inv_link(eta)
W2 = self.d2canonical(mu)
Psc = (y-mu) * (W2*W0+W1*Vinv)
Psb = Vinv*W0
res = (Psc - Psb)*self.weights
return -res/phi
class Gaussian(ExponentialFamily):
def __init__(self, link=IdentityLink, weights=1.0, scale=1.0):
super().__init__(link, weights, scale)
def _loglike(self, y, eta=None, mu=None, T=None, scale=1.0):
if mu is None:
mu = self._to_mean(eta=eta, T=T)
y, mu = self.cshape(y, mu)
w = self.weights / scale
ll= w * np.power((y - mu), 2) + np.log(scale/self.weights)
return ll
def _full_loglike(self, y, eta=None, mu=None, T=None, scale=1.0):
ll = self._loglike(y, eta, mu, T, scale)
llf = ll + LN2PI
return llf
def canonical_parameter(self, mu):
T = mu
return T
def cumulant(self, T):
b = T**2 / 2.0
return b
def mean_func(self, T):
mu = T
return mu
def var_func(self, T=None, mu=None, eta=None, scale=1.0):
if mu is None:
mu = self._to_mean(eta=eta, T=T)
V = mu*0.0+1.0
return V
def d2canonical(self, mu):
res = 0.0*mu+1.0
return res
def deviance(self, y, T=None, mu=None, eta=None, scale=1.0):
if mu is None:
mu = self._to_mean(eta=eta, T=T)
y, mu = self.cshape(y, mu)
w = self.weights
d = w * np.power((y - mu), 2.0)
return d
def dtau(self, tau, y, mu):
y, mu = self.cshape(y, mu)
w = self.weights
phi = np.exp(tau)
g = -np.sum(w * np.power((y - mu), 2) / phi - 1)
return g
def d2tau(self, tau, y, mu):
y, mu = self.cshape(y, mu)
w = self.weights
phi = np.exp(tau)
g = np.sum(w * np.power((y - mu), 2) / (2 * phi))
return g
class InverseGaussian(ExponentialFamily):
def __init__(self, link=PowerLink(-2), weights=1.0, scale=1.0):
super().__init__(link, weights, scale)
def _loglike(self, y, eta=None, mu=None, T=None, scale=1.0):
if mu is None:
mu = self._to_mean(eta=eta, T=T)
y, mu = self.cshape(y, mu)
w = self.weights / scale
ll = w * np.power((y - mu), 2) / (y * mu**2)
ll+= np.log((scale * y**2) / self.weights)
return ll
def _full_loglike(self, y, eta=None, mu=None, T=None, scale=1.0):
ll = self._loglike(y, eta, mu, T, scale)
llf = ll + LN2PI
return llf
def canonical_parameter(self, mu):
T = 1.0 / ( | np.power(mu, 2.0) | numpy.power |
import matplotlib.pyplot as plt
import numpy as np
import csv
import html
import re
import random
import json
import nltk
from nltk.tokenize import regexp_tokenize
from nltk.corpus import stopwords
from nltk import FreqDist
from string import ascii_lowercase
from sklearn.metrics import confusion_matrix, accuracy_score
nltk.download('punkt')
nltk.download('stopwords')
filename = 'data/twitter.csv'
filename_cache = 'data/twitter.json'
nb_samples = 100000
nb_words = 10000
nb_classes = 2
nb_alpha = 1
ds_from_cache = True
class Dataset:
def __init__(self):
if ds_from_cache:
print('Using cached dataset...')
self.deserialize()
else:
print('Using new dataset...')
self.load(filename)
self.clean()
self.calculate_bow_lr()
self.split()
self.calculate_occurrences()
self.calculate_likelihoods()
self.serialize()
def compress_np1d(self, arr):
return {i: str(arr[i]) for i in range(len(arr)) if arr[i] != 0}
def decompress_np1d(self, map):
arr = np.zeros(nb_words, dtype=np.float32)
for (i, x) in map.items():
arr[int(i)] = float(x)
return arr
def serialize(self):
print('Serializing dataset...')
with open(filename_cache, 'w') as f:
compress_train_x = [self.compress_np1d(x) for x in self.train_x]
compress_test_x = [self.compress_np1d(x) for x in self.test_x]
ds_json = {
'train_x': compress_train_x,
'train_y': self.train_y.tolist(),
'test_x': compress_test_x,
'test_y': self.test_y.tolist(),
'like': self.like.tolist(),
'top_neg': self.top_neg,
'top_pos': self.top_pos,
'lr_min': self.lr_min,
'lr_max': self.lr_max
}
json.dump(ds_json, f)
def deserialize(self):
with open(filename_cache, 'r') as f:
ds_json = json.load(f)
self.train_x = [self.decompress_np1d(x) for x in ds_json['train_x']]
self.train_y = ds_json['train_y']
self.test_x = [self.decompress_np1d(x) for x in ds_json['test_x']]
self.test_y = ds_json['test_y']
self.like = ds_json['like']
self.top_neg = ds_json['top_neg']
self.top_pos = ds_json['top_pos']
self.lr_min = ds_json['lr_min']
self.lr_max = ds_json['lr_max']
# Ucitavanje podataka
def load(self, filename):
print('Loading data...')
self.data_x = []
self.data_y = []
with open(filename, 'r', encoding='latin1') as fin:
reader = csv.reader(fin, delimiter=',')
next(reader, None)
for row in reader:
self.data_y.append(int(row[1]))
self.data_x.append(row[2])
# Ciscenje podataka
def clean(self):
print('Cleaning data...')
self.data_x = [html.unescape(x) for x in self.data_x]
self.data_x = [re.sub(r'https?://\S+', '', x) for x in self.data_x]
self.data_x = [re.sub(r'[^\w\s]|\d+', '', x) for x in self.data_x]
self.data_x = [re.sub(r'\s\s+', ' ', x) for x in self.data_x]
self.data_x = [x.strip().lower() for x in self.data_x]
for c in ascii_lowercase:
self.data_x = [re.sub(c + '{3,}', c+c, x) for x in self.data_x]
self.data_x = [regexp_tokenize(x, '\w+') for x in self.data_x]
stops = set(stopwords.words('english'))
self.data_x = [[w for w in x if not w in stops] for x in self.data_x]
self.data_x = self.data_x[:nb_samples]
self.data_y = self.data_y[:nb_samples]
# Racunanje BOW reprezentacije i LR metrike
def calculate_bow_lr(self):
print('Calculating BOW representation and LR metric...')
freq = FreqDist([w for x in self.data_x for w in x])
self.vocab, _ = zip(*freq.most_common(nb_words))
self.vec_x = np.zeros((len(self.data_x), nb_words), dtype=np.float32)
lr = np.zeros(nb_words, dtype=np.float32)
for j, w in enumerate(self.vocab):
neg = 0
pos = 0
for i, x in enumerate(self.data_x):
cnt = x.count(w)
self.vec_x[i][j] = cnt
if self.data_y[i] == 0:
neg += cnt
else:
pos += cnt
if pos >= 10 and neg >= 10:
lr[j] = pos / neg
if j % 100 == 0:
print('[calculate_bow_lr] Word: {}/{}'.format(j, nb_words))
# Pronalazenje pet najcesce koriscenih reci u negativnim tvitovima
freq_neg = FreqDist([w for i, x in enumerate(self.data_x) for w in x if self.data_y[i] == 0])
self.top_neg, _ = zip(*freq_neg.most_common(5))
# Pronalazenje pet najcesce koriscenih reci u pozitivnim tvitovima
freq_pos = FreqDist([w for i, x in enumerate(self.data_x) for w in x if self.data_y[i] == 1])
self.top_pos, _ = zip(*freq_pos.most_common(5))
# Pronalazenje pet reci sa najmanjom vrednoscu LR metrike
self.lr_min = []
min_cnt = 1
for i in lr.argsort():
if min_cnt > 5:
break
if lr[i] > 0:
self.lr_min.append(self.vocab[i])
min_cnt += 1
# Pronalazenje pet reci sa najvecom vrednoscu LR metrike
self.lr_max = []
max_cnt = 1
for i in (-lr).argsort():
if max_cnt > 5:
break
if lr[i] > 0:
self.lr_max.append(self.vocab[i])
max_cnt += 1
# Deljenje podataka na skup za treniranje i testiranje
def split(self):
print('Splitting data...')
self.train_x, self.test_x = np.split(self.vec_x, [int(len(self.vec_x)*0.8)])
self.train_y, self.test_y = np.split(self.data_y, [int(len(self.data_y)*0.8)])
self.nb_train = len(self.train_x)
self.nb_test = len(self.test_x)
# Racunanje broja pojavljivanja svake reci u svakoj klasi
def calculate_occurrences(self):
print('Calculating every word occurrence for every class...')
self.occs = np.zeros((nb_classes, nb_words), dtype=np.float32)
for i, y in enumerate(self.train_y):
for w in range(nb_words):
self.occs[y][w] += self.train_x[i][w]
if i % 1000 == 0:
print('[calculate_occurrences] Object: {}/{}'.format(i, self.nb_train))
# Racunanje P(rec|klasa)
def calculate_likelihoods(self):
print('Calculating P(word|class)...')
self.like = np.zeros((nb_classes, nb_words), dtype=np.float32)
for c in range(nb_classes):
for w in range(nb_words):
up = self.occs[c][w] + nb_alpha
down = | np.sum(self.occs[c]) | numpy.sum |
from numpy import linalg as LA
import numpy as np
MAX_SIG_VALUE = 10000000
def get_avg_gradient(gradient_list, num_of_workers):
summed_gradient = gradient_list[0]
i = 0
for gradient in gradient_list:
i += 1
if i == 1:
continue
j = 0
for gradient_part in gradient:
summed_gradient[j] = np.add(summed_gradient[j], gradient_part)
j += 1
avg_gradient = []
for gradient_part in summed_gradient:
avg_gradient.append(gradient_part / num_of_workers)
return avg_gradient
def get_gradient_diff(previous_gradient, new_gradient):
gradient_diff = []
for i in range(len(previous_gradient)):
gradient_diff.append( | np.subtract(new_gradient[i], previous_gradient[i]) | numpy.subtract |
import numpy as np
def get_bands(nscf_out, tgrid0):
"""Get bands and kgrid info from nscf output
data contains: kvecs, bands, tgrid, raxes, gvecs
kvecs (nk, ndim) are reciprocal points possible in the irreducible wedge
kvecs are in 2\pi/alat units
bands (nk, nstate) are the Kohn-Sham eigenvalues
bands are in eV units
tgrid (ndim) is grid size in each dimension
!!!! currently assumed to be the same as x
raxes (ndim, ndim) is the reciprocal lattice
gvecs (nk, ndim) are reciprocal lattice points (kvecs) converted to integers
Args:
nscf_out (str): output file
tgrid0 (int): grid along x
Return:
dict: data
"""
from qharv.inspect import axes_pos
import qe_reader as qer
# get bands
data = qer.parse_nscf_bands(nscf_out)
kvecs = data['kvecs']
# get raxes, gvecs
tgrid = np.array([tgrid0]*3)
axes = qer.read_out_cell(nscf_out)
raxes = axes_pos.raxes(axes)
gcand = np.dot(kvecs, np.linalg.inv(raxes/tgrid))
gvecs = np.around(gcand).astype(int)
data['tgrid'] = tgrid
data['raxes'] = raxes
data['gvecs'] = gvecs
data.pop('nkpt')
return data
def get_ekmap(scf_out):
"""Obtain the internal variable 'equiv' from kpoint_grid.f90 in QE/PW
store the maps between full BZ (fBZ) and irreducible BZ (iBZ)
Args:
scf_out (str): output file
Return:
(dict, dict): (fBZ->iBZ, iBZ->fBZ) maps
"""
from qharv.reel import ascii_out
mm = ascii_out.read(scf_out)
text = ascii_out.block_text(mm, 'equivalent kpoints begin', 'end')
lines = text.split('\n')
emap = {} # full kgrid to irreducible wedge
kmap = {} # irreducible wedge to full kgrid
for line in lines:
tokens = line.split('equiv')
if len(tokens) != 2: continue
left, right = map(int, tokens)
emap[left] = right
if right in kmap:
kmap[right].append(left)
else:
kmap[right] = [left]
mm.close()
return emap, kmap
def get_weights(equiv_out):
"""Get weights of irreducible kpoints.
Args:
equiv_out (str): QE output file
Return:
np.array: weights, number of equivalent kpoints for each irrek
"""
emap, kmap = get_ekmap(equiv_out)
sidxl = kmap.keys()
sidxl.sort()
weights = []
for sidx in sidxl:
kwt = len(kmap[sidx])
weights.append(kwt)
return np.array(weights)
def unfold2(bands, emap, kmap, axis=0):
"""unfold method 2: steal equivalence map from QE kpoint_grid.f90
kpoints in bands MUST be ordered in the same way as the QE irreducible kpts
Args:
bands (np.array): band energy with kpoint (and state) labels
emap (dict): int -> int equivalence map of kpoint indices (full -> irrek)
kmap (dict): inverse of emap
axis (int, optional): kpoint axis, default is 0
Return:
np.array: unfolded bands
"""
idxl = kmap.keys()
idxl.sort()
nktot = len(emap)
# extend the kpoint axis
new_shape = list(bands.shape)
new_shape[axis] = nktot
vals = np.zeros(new_shape)
# fill existing values
for i, idx in enumerate(idxl):
if axis == 0:
vals[idx-1] = bands[i]
elif axis == 1:
vals[:, idx-1] = bands[:, i]
else:
raise RuntimeError('need to implement axis %d (add another :,)' % axis)
# map symmetry points
for idx0, idx1 in emap.items():
if axis == 0:
vals[idx0-1] = vals[idx1-1]
elif axis == 1:
vals[:, idx0-1] = vals[:, idx1-1]
return vals
def get_mats_vecs(symops):
mats = []
vecs = []
for so in symops:
mat = np.array(so['mat'], int)
vec = np.array(so['vec'], int)
mats.append(mat)
vecs.append(vec)
return np.array(mats), np.array(vecs)
def unfold1(gvecs1, nkm1, nscf_out, pbc, show_progress=True):
"""unfold method 1: apply symmetry operations to unique gvecs
notice, there is no reason to carry nkm1 around
todo: unfold kgrid only, one symmetry operation at a time
return a list of 1D indices on the regular grid
Args:
gvecs1 (np.array): integer vectors in the irreducible BZ
nkm1 (np.array): scalar field defined over gvecs1
scf_out (str): nscf output containing symmetry matrices
pbc (bool): apply periodic boundary condition
show_progress (bool, optional): show progress bar, default True
"""
# get symops
import qe_reader as qer
symops = qer.read_sym_ops(nscf_out)
# make a grid large enough to contain the unfolded n(k)
import chiesa_correction as chc
gmin, gmax, ng = chc.get_regular_grid_dimensions(gvecs1)
rgvecs = chc.get_regular_grid(gmin, gmax, ng, int)
# unfold
rnkm = np.zeros(len(rgvecs))
filled = np.zeros(len(rgvecs), dtype=bool)
if show_progress:
from qharv.field import sugar
bar = sugar.get_progress_bar(len(symops))
for isym, so in enumerate(symops):
mat = np.array(so['mat'], dtype=int)
for ig, gv in enumerate(gvecs1): # unfold existing data
gv1 = np.dot(mat, gv)
if pbc:
# bring back gvectors outside of rgvecs
gv1 = (gv1-gmin) % ng + gmin
else:
# ignore gvectors outside of rgvecs
if (gv1 < gmin).any() or (gv1 > gmax).any(): continue
idx3d = gv1-gmin
# save new point
idx = np.ravel_multi_index(idx3d, ng)
if not filled[idx]:
rnkm[idx] = nkm1[ig]
filled[idx] = True
if show_progress:
bar.update(isym)
return rgvecs[filled], rnkm[filled]
def unfold_idx(gvecs1, mats, pbc):
# make a grid large enough to contain the unfolded n(k)
import chiesa_correction as chc
gmin, gmax, ng = chc.get_regular_grid_dimensions(gvecs1)
rgvecs = chc.get_regular_grid(gmin, gmax, ng, int)
# unfold
npt = np.prod(ng)
filled = np.zeros(npt, dtype=bool)
ridx = | np.ones(npt, dtype=int) | numpy.ones |
#!/usr/bin/env python3
import libspn as spn
import tensorflow as tf
import numpy as np
import collections
from random import shuffle
def _broadcast_to_2D(test_inputs, subset_indices=None, n_stack=2):
# Subset indices is either specified or set to [0, ..., len(inputs)-1]
subset_indices = subset_indices or list(range(len(test_inputs[0])))
ret = []
for test_input in test_inputs:
# Append a tuple with n_stack repetitions if the index of the original element index is in
# subset_indices
ret.append(tuple(np.asarray(n_stack*[elem]) if ind in subset_indices else elem
for ind, elem in enumerate(test_input)))
ret.append(test_input)
return ret
class TestMath(tf.test.TestCase):
def test_logmatmul(self):
a = tf.random_uniform(shape=(8, 150))
b = tf.random_uniform(shape=(150, 9))
ab_linear = tf.matmul(a, b)
ab_log = tf.exp(spn.utils.logmatmul(tf.log(a), tf.log(b)))
with self.test_session() as sess:
ab_linear_out, ab_log_out = sess.run([ab_linear, ab_log])
self.assertAllClose(ab_linear_out, ab_log_out)
def test_gather_columns_3d_not_padded(self):
def assert_output(params, indices, params_dtype, output, output_shape):
# Assert Output values, shape and dtype
true_output = (params[indices] if len(params.shape) == 1
else params[:, indices])
np.testing.assert_array_almost_equal(output,
np.array(true_output))
self.assertEqual(params_dtype.as_numpy_dtype, output.dtype)
np.testing.assert_array_equal(output_shape,
list(np.array(true_output).shape))
def test(params_shape, indices_shape, param_dtype, ind_dtype, use_gpu=False):
if use_gpu:
device = [False, True]
else:
device = [False]
if len(params_shape) == 1:
params_cols = params_shape[0]
else:
params_cols = params_shape[1]
for p_dt in param_dtype:
for i_dt in ind_dtype:
for dev in device:
with self.test_session(use_gpu=dev) as sess:
# Generate random params array
params = np.random.randint(100, size=params_shape)
# Convert params to appropriate data-types
params = np.array(params, dtype=p_dt.as_numpy_dtype)
# Create params tensor
params_tensor = tf.constant(params, dtype=p_dt)
# Random indices
random_indices = np.random.randint(params_cols,
size=indices_shape,
dtype=i_dt)
# Arange indices
if len(indices_shape) == 1:
arange_indices = np.arange(0, params_cols, dtype=i_dt)
else:
arange_indices = np.array([np.arange(0, params_cols) for
_ in range(indices_shape[0])],
dtype=i_dt)
# Create Ops
op_rand_ind = spn.utils.gather_cols_3d(params_tensor,
random_indices)
op_arange_ind = spn.utils.gather_cols_3d(params_tensor,
arange_indices)
# Execute Sessions
output_rand_ind = sess.run(op_rand_ind)
output_arange_ind = sess.run(op_arange_ind)
# Test Output
assert_output(params, random_indices, p_dt, output_rand_ind,
op_rand_ind.get_shape())
assert_output(params, arange_indices, p_dt, output_arange_ind,
op_arange_ind.get_shape())
# List of params shapes
params_shapes = [(1, ), # Single params
(1, 1), # 2D params with single row and column
(6, ), # 1D params
(3, 1), # 2D params with single column
(1, 6), # 2D params with single row
(3, 6)] # 2D params with multiple rows and columns
# List of indices shapes
indices_shapes = [(1, ), # Single index
(1, 1), # 2D indices with single row and column
(4, ), # 1D indices
(4, 1), # 2D indices with single column
(1, 5), # 2D indices with single row
(4, 5)] # 2D indices with multiple rows and columns
# All combination of test cases for gather_cols_3d without padding
for p_shape in params_shapes:
for i_shape in indices_shapes:
test(params_shape=p_shape, indices_shape=i_shape,
param_dtype=[tf.float32, tf.float64, tf.int32, tf.int64],
ind_dtype=[np.int32, np.int64],
use_gpu=True)
def test_gather_columns_3d_padded(self):
def test(params_shape, indices_shape, param_dtype, ind_dtype,
pad_elem=0, use_gpu=False):
if use_gpu:
device = [False, True]
else:
device = [False]
if len(params_shape) == 1:
params_rows = 1
params_cols = params_shape[0]
else:
params_rows = params_shape[0]
params_cols = params_shape[1]
if len(indices_shape) == 1:
indices_rows = 1
indices_cols = indices_shape[0]
else:
indices_rows = indices_shape[0]
indices_cols = indices_shape[1]
for p_dt in param_dtype:
for i_dt in ind_dtype:
for dev in device:
with self.test_session(use_gpu=dev) as sess:
# Generate random params array
params = np.random.randint(100, size=params_shape)
# Convert params to appropriate data-types
params = np.array(params, dtype=p_dt.as_numpy_dtype)
# Create params tensor
params_tensor = tf.constant(params, dtype=p_dt)
# Generate a list of 1D indices arrays, with random
# length ranging between [1, indices-column-size)
indices = []
ind_length = indices_cols
for i in range(indices_rows):
indices.append(np.random.randint(params_cols,
size=ind_length,
dtype=i_dt))
ind_length = np.random.randint(1, indices_cols)
# Shuffle indices list
shuffle(indices)
# Create Ops
op = spn.utils.gather_cols_3d(params_tensor, indices,
pad_elem=pad_elem)
# Execute session
output = sess.run(op)
# Insert a column of zeros to the last column of params
params_with_zero = \
np.insert(params, params_cols,
np.ones(params_rows,
dtype=p_dt.as_numpy_dtype)*pad_elem,
axis=-1)
# Fill indices of padded columns with index of the
# last-column of params
indices = [np.insert(ind, ind.size,
np.full((indices_cols-ind.size),
params_cols, dtype=i_dt))
for ind in indices]
# Convert list of indices to a np.array
indices = np.array(indices)
# Compute true output
true_output = (params_with_zero[indices] if
len(params_with_zero.shape) == 1
else params_with_zero[:, indices])
# Test Output values, shape and dtype
np.testing.assert_array_almost_equal(output,
np.array(true_output))
self.assertEqual(p_dt.as_numpy_dtype, output.dtype)
np.testing.assert_array_equal(op.get_shape(),
list(np.array(true_output).shape))
# List of params shapes
params_shapes = [(6, ), # 1D params
(1, 6), # 2D params with single row
(3, 6)] # 2D params with multiple rows and columns
# List of padding elements
pad_elems = [-float('inf'), -1.0, 0.0, 1.0, 1.23456789, float('inf'), # float
-1, 0, 1, 12345678] # int
# All combination of test cases for gather_cols_3d without padding
for p_shape in params_shapes:
for p_elem in pad_elems:
test(params_shape=p_shape, indices_shape=(4, 5),
param_dtype=[tf.float32, tf.float64, tf.int32, tf.int64],
ind_dtype=[np.int32, np.int64], pad_elem=p_elem,
use_gpu=True)
def test_scatter_cols_errors(self):
# Should work
spn.utils.scatter_cols(tf.constant([10, 11, 12]),
[0, 1, 2], 3)
spn.utils.scatter_cols(tf.constant([[10, 11, 12]]),
[0, 1, 2], 3)
spn.utils.scatter_cols(tf.placeholder(tf.float32,
shape=(None, 3)),
[0, 1, 2], 3)
# Param size defined
with self.assertRaises(RuntimeError):
spn.utils.scatter_cols(tf.placeholder(tf.float32,
shape=(None, None)),
[0, 1, 2], 3)
# Param dim number
with self.assertRaises(ValueError):
spn.utils.scatter_cols(tf.constant(10),
[0, 1, 2], 3)
with self.assertRaises(ValueError):
spn.utils.scatter_cols(tf.constant([[[10, 11, 12]]]),
[0, 1, 2], 3)
# num_out_cols type
with self.assertRaises(ValueError):
spn.utils.scatter_cols(tf.constant([10, 11, 12]),
[0, 1, 2], 3.1)
# num_out_cols value
with self.assertRaises(ValueError):
spn.utils.scatter_cols(tf.constant([10, 11, 12]),
[0, 1, 2], 2)
# Indices dims
with self.assertRaises(ValueError):
spn.utils.scatter_cols(tf.constant([10, 11, 12]),
[[0, 1, 2]], 3)
with self.assertRaises(ValueError):
spn.utils.scatter_cols(tf.constant([10, 11, 12]),
1, 3)
# Indices size
with self.assertRaises(ValueError):
spn.utils.scatter_cols(tf.constant([10, 11, 12]),
[0, 1, 2, 3], 4)
with self.assertRaises(ValueError):
spn.utils.scatter_cols(tf.constant([10, 11, 12]),
[0, 1], 4)
with self.assertRaises(ValueError):
spn.utils.scatter_cols(tf.constant([10, 11, 12]),
[], 4)
# Indices values
with self.assertRaises(ValueError):
spn.utils.scatter_cols(tf.constant([10, 11, 12]),
[0.1, 1, 2], 3)
with self.assertRaises(ValueError):
spn.utils.scatter_cols(tf.constant([10, 11, 12]),
[0, 1, 3], 3)
with self.assertRaises(ValueError):
spn.utils.scatter_cols(tf.constant([10, 11, 12]),
[0, 1, 1], 3)
def test_scatter_cols(self):
def test(params, indices, num_out_cols, true_output,
params_dtype, indices_dtype, on_gpu):
with self.subTest(params=params, indices=indices,
num_out_cols=num_out_cols,
params_dtype=params_dtype,
indices_dtype=indices_dtype,
on_gpu=on_gpu):
tf.reset_default_graph()
with self.test_session(force_gpu=on_gpu) as sess:
# Indices
indices = np.asarray(indices, dtype=indices_dtype)
# Params
p1d = tf.constant(params, dtype=params_dtype)
p2d1 = tf.constant(np.array([np.array(params)]),
dtype=params_dtype)
p2d2 = tf.constant(np.array([np.array(params),
np.array(params) * 2,
np.array(params) * 3]),
dtype=params_dtype)
# Define ops for different implementations
op1dn = spn.utils.scatter_cols(p1d, indices, num_out_cols)
op2d1n = spn.utils.scatter_cols(p2d1, indices, num_out_cols)
op2d2n = spn.utils.scatter_cols(p2d2, indices, num_out_cols)
# Run
out1dn = sess.run(op1dn)
out2d1n = sess.run(op2d1n)
out2d2n = sess.run(op2d2n)
# Compare
np.testing.assert_array_almost_equal(out1dn, true_output)
self.assertEqual(params_dtype.as_numpy_dtype, out1dn.dtype)
true_output_2d1 = [np.array(true_output)]
true_output_2d2 = [np.array(true_output),
np.array(true_output) * 2,
np.array(true_output) * 3]
np.testing.assert_array_almost_equal(out2d1n, true_output_2d1)
np.testing.assert_array_almost_equal(out2d2n, true_output_2d2)
self.assertEqual(params_dtype.as_numpy_dtype, out2d1n.dtype)
self.assertEqual(params_dtype.as_numpy_dtype, out2d2n.dtype)
def test_all_dtypes(params, indices, num_out_cols, true_output):
# CPU
test(params, indices, num_out_cols, true_output,
tf.float32, np.int32, False)
test(params, indices, num_out_cols, true_output,
tf.float32, np.int64, False)
test(params, indices, num_out_cols, true_output,
tf.float64, np.int32, False)
test(params, indices, num_out_cols, true_output,
tf.float64, np.int64, False)
# GPU
test(params, indices, num_out_cols, true_output,
tf.float32, np.int32, True)
test(params, indices, num_out_cols, true_output,
tf.float32, np.int64, True)
test(params, indices, num_out_cols, true_output,
tf.float64, np.int32, True)
test(params, indices, num_out_cols, true_output,
tf.float64, np.int64, True)
# Single column input, single column output
test_all_dtypes([10],
[0],
1,
[10.0])
# Multi-column output, single-column input
test_all_dtypes([10],
[1],
4,
[0.0, 10.0, 0.0, 0.0])
# Multi-column output, multi-column input
test_all_dtypes([10, 11, 12],
[1, 3, 0],
4,
[12.0, 10.0, 0.0, 11.0])
# Pass through if scattering to a single column
t = tf.constant([10])
out = spn.utils.scatter_cols(t, [0], 1)
self.assertIs(out, t)
t = tf.constant([[10],
[11]])
out = spn.utils.scatter_cols(t, [0], 1)
self.assertIs(out, t)
# Pass through if scattering to the output of same size
# in original index order
t = tf.constant([10, 11, 12])
out = spn.utils.scatter_cols(t, [0, 1, 2], 3)
self.assertIs(out, t)
t = tf.constant([[10, 11, 12],
[13, 14, 15]])
out = spn.utils.scatter_cols(t, [0, 1, 2], 3)
self.assertIs(out, t)
def test_scatter_values(self):
def test(params, indices, num_out_cols, param_dtype, ind_dtype,
true_output, use_gpu=False):
if use_gpu:
device = [False, True]
else:
device = [False]
for p_dt in param_dtype:
for i_dt in ind_dtype:
for dev in device:
with self.test_session(use_gpu=dev) as sess:
row1 = 1
row2 = -1
row3 = 2
# Convert params and output to appropriate data-types
if p_dt == tf.float32 or p_dt == tf.float64:
par = list(map(float, params))
if isinstance(true_output[0], collections.Iterable):
t_out = [list(map(float, to)) for to in
true_output]
else:
t_out = list(map(float, true_output))
else:
par = list(map(int, params))
if isinstance(true_output[0], collections.Iterable):
t_out = [list(map(int, to)) for to in
true_output]
else:
t_out = list(map(int, true_output))
p1d = tf.constant(np.array(par), dtype=p_dt)
p2d1 = tf.constant(np.array([np.array(par)]),
dtype=p_dt)
p2d2 = tf.constant(np.array([np.array(par) * row1,
np.array(par) * row2,
np.array(par) * row3]),
dtype=p_dt)
ind1d = tf.constant(np.array(indices), dtype=i_dt)
ind2d1 = tf.constant(np.array([np.array(indices)]),
dtype=i_dt)
ind2d2 = tf.constant(np.array([ | np.array(indices) | numpy.array |
import numpy as np
import time
from inspect import signature
def nparams(f):
return len(signature(f).parameters)
def sign(x):
if x >= 0:
return 1.0
else:
return -1.0
def FindMinima(fn, gradfn, eta=0.1, threshold=1e-3):
# Determine the number of arguments to the gradient.
n_params = len(signature(gradfn).parameters)
# Create a randomized starting position.
position = (np.random.rand(n_params) - 0.5) * 20
gradient = np.ones(n_params)
while True:
# Calculate the gradient.
gradient = gradfn(*position)
#print(gradient)
# Check for zero gradient.
closeness = np.abs(gradient).max()
if closeness < threshold:
break
# Move each coordinate based on the gradient.
cofactor = ((closeness**4) / (1 + closeness**4)) + (eta / 10)
for c in range(n_params):
position[c] -= eta * sign(gradient[c]) * cofactor
#position[c] -= eta * gradient[c] * cofactor
# We may have reached a minimum. Return this position.
return position
def Minimize(fn, gradfn, std_threshold=0.01, n_best=10):
# Determine the number of arguments to the gradient.
n_params = len(signature(gradfn).parameters)
positions = []
minima = []
# Find enough minima to start looking for convergence.
for i in range(n_best):
pos = FindMinima(fn, gradfn)
val = fn(*pos)[0]
minima.append(val)
positions.append(pos)
minima.sort()
while np.array(minima[:n_best]).std() > std_threshold:
print(len(minima))
pos = FindMinima(fn, gradfn)
val = fn(*pos)[0]
minima.append(val)
print(val)
positions.append(pos)
minima.sort()
minidx = np.array(minima).argmin()
minimum = minima[minidx]
minpos = positions[minidx]
return minimum, minpos
def f(x, y, z):
return [-np.exp(-((x / 5)**2 + (y / 5)**2 + (z / 5)**2))*(np.sin(x) + | np.sin(y) | numpy.sin |
# ============================================================================
# 第七章 給湯設備
# 第一節 給湯設備
# Ver.18(エネルギー消費性能計算プログラム(住宅版)Ver.02.05~)
# ============================================================================
import numpy as np
from functools import lru_cache
import pyhees.section7_1_b as default
import pyhees.section7_1_c as gas
import pyhees.section7_1_d as oil
import pyhees.section7_1_e as eheatpump
import pyhees.section7_1_f as eheater
import pyhees.section7_1_g as hybrid_gas
import pyhees.section7_1_g_3 as hybrid_gas_3
import pyhees.section7_1_h as gas_hybrid
import pyhees.section7_1_i as whybrid
import pyhees.section7_1_j as watersaving
import pyhees.section7_1_m as schedule
import pyhees.section9_2 as lss
import pyhees.section9_3 as ass
from pyhees.section11_1 import load_outdoor, get_Theta_ex
from pyhees.section11_2 import load_solrad
from pyhees.section11_3 import load_schedule, get_schedule_hw
# ============================================================================
# 5. 給湯設備によるエネルギー消費量
# ============================================================================
# ============================================================================
# 5.1 消費電力量
# ============================================================================
@lru_cache()
def calc_hotwater_load(n_p, region, sol_region, has_bath, bath_function, pipe_diameter, kitchen_watersaving_A,
kitchen_watersaving_C, shower_watersaving_A, shower_watersaving_B, washbowl_watersaving_C,
bath_insulation,
type=None, ls_type=None, A_sp=None, P_alpha_sp=None, P_beta_sp=None, W_tnk_ss=None,
hotwater_use=None, heating_flag_d=None, A_col=None, P_alpha=None, P_beta=None, V_fan_P0=None,
d0=None, d1=None, m_fan_test=None, W_tnk_ass=None
):
"""給湯負荷の計算
Args:
n_p(float): 仮想居住人数 (人)
region(int): 省エネルギー地域区分
sol_region(int): 年間の日射地域区分(1-5)
has_bath(bool): 浴室等の有無
bath_function(str): ふろ機能の種類
pipe_diameter(str): ヘッダー分岐後の径
kitchen_watersaving_A(bool): 台所水栓の手元止水機能の有無
kitchen_watersaving_C(bool): 台所水栓の水優先吐水機能の有無
shower_watersaving_A(bool): 浴室シャワー水栓の手元止水機能の有無
shower_watersaving_B(bool): 浴室シャワー水栓の小流量吐水機能の有無
washbowl_watersaving_C(bool): 洗面水栓の水優先吐水機能の有無
bath_insulation(bool): 浴槽の断熱の有無
type(str, optional): 太陽熱利用設備の種類 (液体集熱式,空気集熱式,None) (Default value = None)
ls_type(str, optional): 液体集熱式太陽熱利用設備の種類 (太陽熱温水器,ソーラーシステム) (Default value = None)
A_sp(float, optional): 太陽熱集熱部の有効集熱面積 (m2) (Default value = None)
P_alpha_sp(float, optional): 太陽熱集熱部の方位角 (°) (Default value = None)
P_beta_sp(float, optional): 太陽熱集熱部の傾斜角 (°) (Default value = None)
W_tnk_ss(float, optional): ソーラーシステムのタンク容量 (L) (Default value = None)
hotwater_use(bool, optional): 空気集熱式太陽熱利用設備が給湯部を有する場合はTrue (Default value = None)
heating_flag_d(ndarray, optional): 暖房日 (Default value = None)
A_col(tuple, optional): 集熱器群の面積 (m2) (Default value = None)
P_alpha(float, optional): 方位角 (°) (Default value = None)
P_beta(float, optional): 傾斜角 (°) (Default value = None)
V_fan_P0(float, optional): 空気搬送ファンの送風機特性曲線において機外静圧をゼロとしたときの空気搬送ファンの風量 (m3/h) (Default value = None)
d0(tuple, optional): 集熱器群を構成する集熱器の集熱効率特性線図一次近似式の切片 (-) (Default value = None)
d1(tuple, optional): 集熱器群を構成する集熱器の集熱効率特性線図一次近似式の傾き (W/(m2K)) (Default value = None)
m_fan_test(tuple, optional): 集熱器群を構成する集熱器の集熱性能試験時における単位面積当たりの空気の質量流量 (kg/(s・m2)) (Default value = None)
W_tnk_ass(float, optional): タンク容量 (L) (Default value = None)
Returns:
dict: 1日当たりの給湯設備付加
"""
# 生活スケジュール
schedule = load_schedule()
schedule_hw = get_schedule_hw(schedule)
# 外部環境
outdoor = load_outdoor()
Theta_ex_d_t = get_Theta_ex(region, outdoor)
# ----- 14. 夜間平均外気温度 -----
# 夜間平均外気温度 (℃) (15)
Theta_ex_Nave_d = get_Theta_ex_Nave_d(Theta_ex_d_t)
# ----- 13. 日平均外気温度 -----
# 日平均外気温度 (℃) (14)
theta_ex_d_Ave_d = get_theta_ex_d_Ave_d(Theta_ex_d_t)
# ----- 12. 日平均給水温度 -----
# 期間平均外気温度 (℃) (13)
Theta_ex_prd_Ave_d = get_Theta_ex_prd_Ave_d(theta_ex_d_Ave_d)
# 日平均給水温度 (℃) (12)
Theta_wtr_d = get_Theta_wtr_d(region, Theta_ex_prd_Ave_d)
# ----- 11. 浴槽沸かし直しによる給湯熱負荷 -----
# 浴槽沸かし直しによる給湯熱負荷 (MJ/h) (10)
L_ba_d_t = calc_L_ba_d_t(bath_insulation, schedule_hw, has_bath, theta_ex_d_Ave_d, n_p)
# ----- 10. 基準給湯量 -----
# 基準給湯量 (L/h) (7)
W_k_d_t = calc_W_k_d_t(n_p, schedule_hw)
W_s_d_t = calc_W_s_d_t(n_p, schedule_hw, has_bath)
W_w_d_t = calc_W_w_d_t(n_p, schedule_hw)
W_b1_d_t = calc_W_b1_d_t(n_p, schedule_hw, has_bath, bath_function)
W_b2_d_t = calc_W_b2_d_t(n_p, schedule_hw, has_bath, bath_function)
# 浴槽水栓さし湯時における基準給湯量 (L/h) (9)
W_ba1_d_t = calc_W_ba1_d_t(bath_function, L_ba_d_t, Theta_wtr_d)
# ----- 9. 節湯補正給湯量 -----
# 節湯補正給湯量 (L/h) (6)
W_dash_k_d_t = calc_W_dash_k_d_t(W_k_d_t, kitchen_watersaving_A, kitchen_watersaving_C, pipe_diameter, Theta_wtr_d)
W_dash_s_d_t = calc_W_dash_s_d_t(W_s_d_t, shower_watersaving_A, shower_watersaving_B, pipe_diameter)
W_dash_w_d_t = calc_W_dash_w_d_t(W_w_d_t, washbowl_watersaving_C, pipe_diameter, Theta_wtr_d)
W_dash_b1_d_t = calc_W_dash_b1_d_t(W_b1_d_t, pipe_diameter)
W_dash_b2_d_t = calc_W_dash_b2_d_t(W_b2_d_t)
W_dash_ba1_d_t = calc_W_dash_ba1_d_t(W_ba1_d_t, pipe_diameter)
# ----- 8. 節湯補正給湯熱負荷 -----
# 基準給湯温度 (℃)
Theta_sw_k = get_Theta_sw_k()
Theta_sw_s = get_Theta_sw_s()
Theta_sw_w = get_Theta_sw_w()
# 節湯補正給湯熱負荷 (MJ/h) (5)
L_dash_k_d_t = get_L_dash_k_d_t(W_dash_k_d_t, Theta_sw_k, Theta_wtr_d)
L_dash_s_d_t = get_L_dash_s_d_t(W_dash_s_d_t, Theta_sw_s, Theta_wtr_d)
L_dash_w_d_t = get_L_dash_w_d_t(W_dash_w_d_t, Theta_sw_w, Theta_wtr_d)
L_dash_b1_d_t, L_dash_b2_d_t = get_L_dash_bx_d_t(W_dash_b1_d_t, W_dash_b2_d_t, Theta_wtr_d, has_bath, bath_function)
L_dash_ba1_d_t, L_dash_ba2_d_t = get_L_dash_bax_d_t(W_dash_ba1_d_t, Theta_wtr_d, L_ba_d_t, has_bath, bath_function)
# ----- 7. 太陽熱補正給湯熱負荷 -----
# 太陽熱利用給湯設備による補正集熱量
L_sun_d_t = calc_L_sun_d_t(
region=region,
sol_region=sol_region,
solar_device=type,
ls_type=ls_type,
A_sp=A_sp,
P_alpha_sp=P_alpha_sp,
P_beta_sp=P_beta_sp,
W_tnk_ss=W_tnk_ss,
hotwater_use=hotwater_use,
heating_flag_d=heating_flag_d,
A_col=A_col,
P_alpha=P_alpha,
P_beta=P_beta,
V_fan_P0=V_fan_P0,
d0=d0,
d1=d1,
m_fan_test=m_fan_test,
W_tnk_ass=W_tnk_ass,
Theta_wtr_d=Theta_wtr_d,
L_dash_k_d_t=L_dash_k_d_t,
L_dash_s_d_t=L_dash_s_d_t,
L_dash_w_d_t=L_dash_w_d_t,
L_dash_b1_d_t=L_dash_b1_d_t,
L_dash_b2_d_t=L_dash_b2_d_t,
L_dash_ba1_d_t=L_dash_ba1_d_t
)
# 太陽熱補正給湯熱負荷
L_dashdash_k_d_t = calc_L_dashdash_k_d_t(L_dash_k_d_t, L_dash_s_d_t, L_dash_w_d_t, L_dash_b1_d_t, L_dash_b2_d_t,
L_dash_ba1_d_t,
L_sun_d_t)
L_dashdash_s_d_t = calc_L_dashdash_s_d_t(L_dash_k_d_t, L_dash_s_d_t, L_dash_w_d_t, L_dash_b1_d_t, L_dash_b2_d_t,
L_dash_ba1_d_t,
L_sun_d_t)
L_dashdash_w_d_t = calc_L_dashdash_w_d_t(L_dash_k_d_t, L_dash_s_d_t, L_dash_w_d_t, L_dash_b1_d_t, L_dash_b2_d_t,
L_dash_ba1_d_t,
L_sun_d_t)
L_dashdash_b1_d_t = calc_L_dashdash_b1_d_t(L_dash_k_d_t, L_dash_s_d_t, L_dash_w_d_t, L_dash_b1_d_t, L_dash_b2_d_t,
L_dash_ba1_d_t, L_sun_d_t)
L_dashdash_b2_d_t = calc_L_dashdash_b2_d_t(L_dash_k_d_t, L_dash_s_d_t, L_dash_w_d_t, L_dash_b1_d_t, L_dash_b2_d_t,
L_dash_ba1_d_t, L_sun_d_t)
L_dashdash_ba1_d_t = calc_L_dashdash_ba1_d_t(L_dash_k_d_t, L_dash_s_d_t, L_dash_w_d_t, L_dash_b1_d_t, L_dash_b2_d_t,
L_dash_ba1_d_t, L_sun_d_t)
L_dashdash_ba2_d_t = get_L_dashdash_ba2_d_t(L_dash_ba2_d_t)
print('L_ba = {}'.format(np.sum(L_ba_d_t)))
print('W_k = {}'.format(np.sum(W_k_d_t)))
print('W_s = {}'.format(np.sum(W_s_d_t)))
print('W_w = {}'.format(np.sum(W_w_d_t)))
print('W_b1 = {}'.format(np.sum(W_b1_d_t)))
print('W_b2 = {}'.format(np.sum(W_b2_d_t)))
print('W_ba1 = {}'.format(np.sum(W_ba1_d_t)))
print('W_dash_k = {}'.format(np.sum(W_dash_k_d_t)))
print('W_dash_s = {}'.format(np.sum(W_dash_s_d_t)))
print('W_dash_w = {}'.format(np.sum(W_dash_w_d_t)))
print('W_dash_b1 = {}'.format(np.sum(W_dash_b1_d_t)))
print('W_dash_b2 = {}'.format(np.sum(W_dash_b2_d_t)))
print('W_dash_ba1 = {}'.format(np.sum(W_dash_ba1_d_t)))
print('L_dash_k = {}'.format(np.sum(L_dash_k_d_t)))
print('L_dash_s = {}'.format(np.sum(L_dash_s_d_t)))
print('L_dash_w = {}'.format(np.sum(L_dash_w_d_t)))
print('L_dash_b1 = {}'.format(np.sum(L_dash_b1_d_t)))
print('L_dash_b2 = {}'.format(np.sum(L_dash_b2_d_t)))
print('L_dash_ba1 = {}'.format(np.sum(L_dash_ba1_d_t)))
print('L_dash_ba2 = {}'.format(np.sum(L_dash_ba2_d_t)))
print('L_dashdash_k = {}'.format(np.sum(L_dashdash_k_d_t)))
print('L_dashdash_s = {}'.format(np.sum(L_dashdash_s_d_t)))
print('L_dashdash_w = {}'.format(np.sum(L_dashdash_w_d_t)))
print('L_dashdash_b1 = {}'.format(np.sum(L_dashdash_b1_d_t)))
print('L_dashdash_b2 = {}'.format(np.sum(L_dashdash_b2_d_t)))
print('L_dashdash_ba1 = {}'.format(np.sum(L_dashdash_ba1_d_t)))
print('L_dashdash_ba2 = {}'.format(np.sum(L_dashdash_ba2_d_t)))
return {
'L_dash_k_d_t': L_dash_k_d_t,
'L_dash_s_d_t': L_dash_s_d_t,
'L_dash_w_d_t': L_dash_w_d_t,
'L_dash_b1_d_t': L_dash_b1_d_t,
'L_dash_b2_d_t': L_dash_b2_d_t,
'L_dash_ba1_d_t': L_dash_ba1_d_t,
'L_dash_ba2_d_t': L_dash_ba2_d_t,
'L_dashdash_k_d_t': L_dashdash_k_d_t,
'L_dashdash_s_d_t': L_dashdash_s_d_t,
'L_dashdash_w_d_t': L_dashdash_w_d_t,
'L_dashdash_b1_d_t': L_dashdash_b1_d_t,
'L_dashdash_b2_d_t': L_dashdash_b2_d_t,
'L_dashdash_ba1_d_t': L_dashdash_ba1_d_t,
'L_dashdash_ba2_d_t': L_dashdash_ba2_d_t,
'W_dash_k_d_t': W_dash_k_d_t,
'W_dash_s_d_t': W_dash_s_d_t,
'W_dash_w_d_t': W_dash_w_d_t,
'W_dash_b1_d_t': W_dash_b1_d_t,
'W_dash_b2_d_t': W_dash_b2_d_t,
'W_dash_ba1_d_t': W_dash_ba1_d_t,
'theta_ex_d_Ave_d': theta_ex_d_Ave_d,
'Theta_ex_Nave_d': Theta_ex_Nave_d
}
def calc_E_E_W_d_t(n_p, L_HWH, heating_flag_d, region, sol_region, HW, SHC):
"""1時間当たりの給湯設備の消費電力量 (1)
Args:
n_p(float): 仮想居住人数 (人)
L_HWH(ndarray): 温水暖房用熱源機の熱負荷
heating_flag_d(ndarray): 暖房日
region(int): 省エネルギー地域区分
sol_region(int): 年間の日射地域区分(1-5)
HW(dict): 給湯機の仕様
SHC(dict): 集熱式太陽熱利用設備の仕様
Returns:
ndarray: 1日当たりの給湯設備の消費電力量 (kWh/d)
"""
if HW is None or HW['hw_type'] is None:
# 台所、洗面所及 び浴室等がいずれも無い場合は0とする
return np.zeros(24 * 365)
if HW['hw_type'] == 'コージェネレーションを使用する':
return np.zeros(24 * 365)
# ふろ機能の修正
bath_function = get_normalized_bath_function(HW['hw_type'], HW.get('bath_function'))
# 給湯負荷の生成
args = {
'n_p': n_p,
'region': region,
'sol_region': sol_region,
'has_bath': HW['has_bath'],
'bath_function': bath_function,
'pipe_diameter': HW['pipe_diameter'],
'kitchen_watersaving_A': HW['kitchen_watersaving_A'],
'kitchen_watersaving_C': HW['kitchen_watersaving_C'],
'shower_watersaving_A': HW['shower_watersaving_A'],
'shower_watersaving_B': HW['shower_watersaving_B'],
'washbowl_watersaving_C': HW['washbowl_watersaving_C'],
'bath_insulation': HW['bath_insulation']
}
if SHC is not None:
if SHC['type'] == '液体集熱式':
args.update({
'type': SHC['type'],
'ls_type': SHC['ls_type'],
'A_sp': SHC['A_sp'],
'P_alpha_sp': SHC['P_alpha_sp'],
'P_beta_sp': SHC['P_beta_sp'],
'W_tnk_ss': SHC['W_tnk_ss']
})
elif SHC['type'] == '空気集熱式':
args.update({
'type': SHC['type'],
'hotwater_use': SHC['hotwater_use'],
'heating_flag_d': tuple(heating_flag_d),
'A_col': SHC['A_col'],
'P_alpha': SHC['P_alpha'],
'P_beta': SHC['P_beta'],
'V_fan_P0': SHC['V_fan_P0'],
'm_fan_test': SHC['m_fan_test'],
'd0': SHC['d0'],
'd1': SHC['d1'],
'W_tnk_ass': SHC['W_tnk_ass']
})
else:
raise ValueError(SHC['type'])
hotwater_load = calc_hotwater_load(**args)
# 1時間当たりの給湯機の消費電力量 (kWh/h)
E_E_hs_d_t = calc_E_E_hs_d_t(
hw_type=HW['hw_type'],
bath_function=bath_function,
hybrid_category=HW['hybrid_category'],
package_id=HW.get('package_id'),
hybrid_param=HW.get('hybrid_param'),
e_rtd=HW['e_rtd'],
e_dash_rtd=HW['e_dash_rtd'],
L_dashdash_k_d_t=hotwater_load['L_dashdash_k_d_t'],
L_dashdash_s_d_t=hotwater_load['L_dashdash_s_d_t'],
L_dashdash_w_d_t=hotwater_load['L_dashdash_w_d_t'],
L_dashdash_b1_d_t=hotwater_load['L_dashdash_b1_d_t'],
L_dashdash_b2_d_t=hotwater_load['L_dashdash_b2_d_t'],
L_dashdash_ba1_d_t=hotwater_load['L_dashdash_ba1_d_t'],
L_dashdash_ba2_d_t=hotwater_load['L_dashdash_ba2_d_t'],
W_dash_k_d_t=hotwater_load['W_dash_k_d_t'],
W_dash_s_d_t=hotwater_load['W_dash_s_d_t'],
W_dash_w_d_t=hotwater_load['W_dash_w_d_t'],
W_dash_b1_d_t=hotwater_load['W_dash_b1_d_t'],
W_dash_b2_d_t=hotwater_load['W_dash_b2_d_t'],
W_dash_ba1_d_t=hotwater_load['W_dash_ba1_d_t'],
theta_ex_d_Ave_d=hotwater_load['theta_ex_d_Ave_d'],
Theta_ex_Nave_d=hotwater_load['Theta_ex_Nave_d'],
L_HWH=L_HWH,
CO2HP=HW['CO2HP'] if 'CO2HP' in HW else None
)
# 太陽利用設備の補機の消費電力量
E_E_aux_ss_d_t = calc_E_E_aux_ss_d_t(
SHC=SHC,
region=region,
sol_region=sol_region,
heating_flag_d=heating_flag_d
)
# 1時間当たりの給湯設備の消費電力量(1)
E_E_W_d_t = E_E_hs_d_t + E_E_aux_ss_d_t
return E_E_W_d_t
def calc_E_E_aux_ss_d_t(SHC, region=None, sol_region=None, heating_flag_d=None):
"""1時間当たりの補機の消費電力量 (kWh/h)
Args:
SHC(dict): 太陽熱利用設備の仕様
region(int, optional): 省エネルギー地域区分 (Default value = None)
sol_region(int, optional): 年間の日射地域区分 (Default value = None)
heating_flag_d(ndarray, optional): 暖房日 (Default value = None)
Returns:
ndarray: 1時間当たりの補機の消費電力量 (kWh/h)
"""
if SHC is None:
return np.zeros(24 * 365)
elif SHC['type'] == '液体集熱式':
# 第九章「自然エネルギー利用設備」第二節「液体集熱式太陽熱利用設備」の算定方法により定まる
# 1時間当たりの補機の消費電力量 (kWh/h)
return lss.calc_E_E_lss_aux_d_t(
ls_type=SHC['ls_type'],
pmp_type='上記以外の機種',
P_alpha_sp=SHC['P_alpha_sp'],
P_beta_sp=SHC['P_beta_sp'],
region=region,
sol_region=sol_region
)
elif SHC['type'] == '空気集熱式':
# 第九章「自然エネルギー利用設備」第三節「空気集熱式太陽熱利用設備」の算定方法により定まる
# 1時間当たりの補機の消費電力量のうちの給湯設備への付加分 (kWh/h)
return ass.calc_E_E_W_aux_ass_d_t(
hotwater_use=SHC['hotwater_use'],
heating_flag_d=heating_flag_d,
region=region,
sol_region=sol_region,
P_alpha=SHC['P_alpha'],
P_beta=SHC['P_beta'],
A_col=SHC['A_col'],
V_fan_P0=SHC['V_fan_P0'],
m_fan_test=SHC['m_fan_test'],
d0=SHC['d0'],
d1=SHC['d1'],
fan_sso=SHC['fan_sso'],
fan_type=SHC['fan_type'],
pump_sso=SHC['pump_sso']
)
else:
raise ValueError(SHC['type'])
# ============================================================================
# 5.2 ガス消費量
# ============================================================================
def calc_E_G_W_d_t(n_p, L_HWH, heating_flag_d, A_A, region, sol_region, HW, SHC):
"""1時間当たりの給湯設備のガス消費量 (MJ/h) (2)
Args:
n_p(float): 仮想居住人数
L_HWH(ndarray): 1日当たりの温水暖房の熱負荷 (MJ/d)
A_A(float): 床面積の合計[m^2]
region(int): 地域区分
sol_region(int): 年間の日射地域区分
HW(dict): 給湯機の仕様
SHC(dict): 集熱式太陽熱利用設備の仕様
heating_flag_d: returns: 1時間当たりの給湯設備のガス消費量 (MJ/h)
Returns:
ndarray: 1時間当たりの給湯設備のガス消費量 (MJ/h)
"""
if HW is None or HW['hw_type'] is None:
# 台所、洗面所及 び浴室等がいずれも無い場合は0とする
return np.zeros(24 * 365)
# ふろ機能の修正
bath_function = get_normalized_bath_function(HW['hw_type'], HW.get('bath_function'))
# 給湯負荷の生成
args = {
'n_p': n_p,
'region': region,
'sol_region': sol_region,
'has_bath': HW['has_bath'],
'bath_function': bath_function,
'pipe_diameter': HW['pipe_diameter'],
'kitchen_watersaving_A': HW['kitchen_watersaving_A'],
'kitchen_watersaving_C': HW['kitchen_watersaving_C'],
'shower_watersaving_A': HW['shower_watersaving_A'],
'shower_watersaving_B': HW['shower_watersaving_B'],
'washbowl_watersaving_C': HW['washbowl_watersaving_C'],
'bath_insulation': HW['bath_insulation']
}
if SHC is not None:
if SHC['type'] == '液体集熱式':
args.update({
'type': SHC['type'],
'ls_type': SHC['ls_type'],
'A_sp': SHC['A_sp'],
'P_alpha_sp': SHC['P_alpha_sp'],
'P_beta_sp': SHC['P_beta_sp'],
'W_tnk_ss': SHC['W_tnk_ss']
})
elif SHC['type'] == '空気集熱式':
args.update({
'type': SHC['type'],
'hotwater_use': SHC['hotwater_use'],
'heating_flag_d': tuple(heating_flag_d),
'A_col': SHC['A_col'],
'P_alpha': SHC['P_alpha'],
'P_beta': SHC['P_beta'],
'V_fan_P0': SHC['V_fan_P0'],
'm_fan_test': SHC['m_fan_test'],
'd0': SHC['d0'],
'd1': SHC['d1'],
'W_tnk_ass': SHC['W_tnk_ass']
})
else:
raise ValueError(SHC['type'])
hotwater_load = calc_hotwater_load(**args)
# 1日当たりの給湯機のガス消費量
E_G_hs_d = calc_E_G_hs_d(
hw_type=HW['hw_type'],
hybrid_category=HW['hybrid_category'],
e_rtd=HW['e_rtd'],
e_dash_rtd=HW['e_dash_rtd'],
bath_function=bath_function,
package_id=HW.get('package_id'),
L_dashdash_k_d_t=hotwater_load['L_dashdash_k_d_t'],
L_dashdash_s_d_t=hotwater_load['L_dashdash_s_d_t'],
L_dashdash_w_d_t=hotwater_load['L_dashdash_w_d_t'],
L_dashdash_b1_d_t=hotwater_load['L_dashdash_b1_d_t'],
L_dashdash_b2_d_t=hotwater_load['L_dashdash_b2_d_t'],
L_dashdash_ba1_d_t=hotwater_load['L_dashdash_ba1_d_t'],
L_dashdash_ba2_d_t=hotwater_load['L_dashdash_ba2_d_t'],
W_dash_k_d_t=hotwater_load['W_dash_k_d_t'],
W_dash_s_d_t=hotwater_load['W_dash_s_d_t'],
W_dash_w_d_t=hotwater_load['W_dash_w_d_t'],
W_dash_b1_d_t=hotwater_load['W_dash_b1_d_t'],
W_dash_b2_d_t=hotwater_load['W_dash_b2_d_t'],
W_dash_ba1_d_t=hotwater_load['W_dash_ba1_d_t'],
Theta_ex_Ave=hotwater_load['theta_ex_d_Ave_d'],
Theta_ex_Nave=hotwater_load['Theta_ex_Nave_d'],
L_HWH=L_HWH,
hybrid_param=HW.get('hybrid_param')
)
return E_G_hs_d
# ============================================================================
# 5.3 灯油消費量
# ============================================================================
def calc_E_K_W_d_t(n_p, L_HWH, heating_flag_d, A_A, region, sol_region, HW, SHC):
"""1時間当たりの給湯設備の灯油消費量 (MJ/h) (3)
Args:
n_p(float): 仮想居住人数
L_HWH(ndarray): 1日当たりの温水暖房の熱負荷 (MJ/d)
A_A(float): 床面積の合計[m^2]
region(int): 省エネルギー地域区分
sol_region(int): 年間の日射地域区分
HW(dict): 給湯機の仕様
SHC(dict): 集熱式太陽熱利用設備の仕様
heating_flag_d: returns: 1時間当たりの給湯設備の灯油消費量 (MJ/h) (3)
Returns:
ndarray: 1時間当たりの給湯設備の灯油消費量 (MJ/h) (3)
"""
if HW is None or HW['hw_type'] is None:
# 台所、洗面所及 び浴室等がいずれも無い場合は0とする
return np.zeros(24 * 365)
# ふろ機能の修正
bath_function = get_normalized_bath_function(HW['hw_type'], HW.get('bath_function'))
# 給湯負荷の生成
args = {
'n_p': n_p,
'region': region,
'sol_region': sol_region,
'has_bath': HW['has_bath'],
'bath_function': bath_function,
'pipe_diameter': HW['pipe_diameter'],
'kitchen_watersaving_A': HW['kitchen_watersaving_A'],
'kitchen_watersaving_C': HW['kitchen_watersaving_C'],
'shower_watersaving_A': HW['shower_watersaving_A'],
'shower_watersaving_B': HW['shower_watersaving_B'],
'washbowl_watersaving_C': HW['washbowl_watersaving_C'],
'bath_insulation': HW['bath_insulation']
}
if SHC is not None:
if SHC['type'] == '液体集熱式':
args.update({
'type': SHC['type'],
'ls_type': SHC['ls_type'],
'A_sp': SHC['A_sp'],
'P_alpha_sp': SHC['P_alpha_sp'],
'P_beta_sp': SHC['P_beta_sp'],
'W_tnk_ss': SHC['W_tnk_ss']
})
elif SHC['type'] == '空気集熱式':
args.update({
'type': SHC['type'],
'hotwater_use': SHC['hotwater_use'],
'heating_flag_d': tuple(heating_flag_d),
'A_col': SHC['A_col'],
'P_alpha': SHC['P_alpha'],
'P_beta': SHC['P_beta'],
'V_fan_P0': SHC['V_fan_P0'],
'm_fan_test': SHC['m_fan_test'],
'd0': SHC['d0'],
'd1': SHC['d1'],
'W_tnk_ass': SHC['W_tnk_ass']
})
else:
raise ValueError(SHC['type'])
hotwater_load = calc_hotwater_load(**args)
# 1時間当たりの給湯機の灯油消費量 (MJ/h)
E_k_hs_d_t = calc_E_K_hs_d_t(
hw_type=HW['hw_type'],
e_rtd=HW['e_rtd'],
e_dash_rtd=HW['e_dash_rtd'],
bath_function=bath_function,
L_dashdash_k_d_t=hotwater_load['L_dashdash_k_d_t'],
L_dashdash_s_d_t=hotwater_load['L_dashdash_s_d_t'],
L_dashdash_w_d_t=hotwater_load['L_dashdash_w_d_t'],
L_dashdash_b1_d_t=hotwater_load['L_dashdash_b1_d_t'],
L_dashdash_b2_d_t=hotwater_load['L_dashdash_b2_d_t'],
L_dashdash_ba1_d_t=hotwater_load['L_dashdash_ba1_d_t'],
L_dashdash_ba2_d_t=hotwater_load['L_dashdash_ba2_d_t'],
theta_ex_d_Ave_d=hotwater_load['theta_ex_d_Ave_d']
)
return E_k_hs_d_t
# ============================================================================
# 5.4 その他の燃料による一次エネルギー消費量
# ============================================================================
def get_E_M_W_d_t():
"""1時間当たりの給湯設備のその他の燃料による一次エネルギー消費量
Args:
Returns:
ndarray: 1時間当たりの給湯設備のその他の燃料による一次エネルギー消費量
"""
# 1時間当たりの給湯設備のその他の燃料による一次エネルギー消費量は0とする
return np.zeros(24 * 365)
# ============================================================================
# 6. 給湯機のエネルギー消費量
# ============================================================================
def calc_E_E_hs_d_t(hw_type, bath_function, package_id, hybrid_param, hybrid_category, e_rtd, e_dash_rtd, Theta_ex_Nave_d, W_dash_k_d_t, W_dash_s_d_t,
W_dash_w_d_t,
W_dash_b1_d_t,
W_dash_b2_d_t, W_dash_ba1_d_t, theta_ex_d_Ave_d, L_dashdash_k_d_t, L_dashdash_s_d_t, L_dashdash_w_d_t,
L_dashdash_b1_d_t,
L_dashdash_b2_d_t, L_dashdash_ba1_d_t, L_dashdash_ba2_d_t, L_HWH, CO2HP):
"""1時間当たりの給湯機の消費電力量 (kWh/h)
Args:
hw_type(str): 給湯機/給湯温水暖房機の種類
bath_function(str): 給湯機の種類
hybrid_category(str): 電気ヒートポンプ・ガス瞬間式併用型給湯温水暖房機の区分
package_id(str): パッケージID
hybrid_param(dic): ハイブリッドパラメーター
e_rtd(float): 当該給湯機の効率
e_dash_rtd(float): エネルギーの使用の合理化に関する法律」に基づく「特定機器の性能の向上に関する製造事業者等の 判断の基準等」(ガス温水機器)に定義される「エネルギー消費効率」
Theta_ex_Nave_d(ndarray): 夜間平均外気温 (℃)
W_dash_k_d_t(ndarray): 1時間当たりの台所水栓における節湯補正給湯量 (L/d)
W_dash_s_d_t(ndarray): 1時間当たりの浴室シャワー水栓における節湯補正給湯量 (L/d)
W_dash_w_d_t(ndarray): 1時間当たりの洗面水栓における節湯補正給湯量 (L/d)
W_dash_b1_d_t(ndarray): 1時間当たりの浴槽水栓湯はり時における節湯補正給湯量 (L/d)
W_dash_b2_d_t(ndarray): 1時間当たりの浴槽追焚時における節湯補正給湯量 (L/d)
W_dash_ba1_d_t(ndarray): 1時間当たりの浴槽水栓さし湯時における節湯補正給湯量 (L/d)
theta_ex_d_Ave_d(ndarray): 日平均外気温度 (℃)
L_dashdash_k_d_t(ndarray): 1時間当たりの台所水栓における太陽熱補正給湯熱負荷 (MJ/d)
L_dashdash_s_d_t(ndarray): 1時間当たりの浴室シャワー水栓における太陽熱補正給湯熱負荷 (MJ/d)
L_dashdash_w_d_t(ndarray): 1時間当たりの洗面水栓における太陽熱補正給湯熱負荷 (MJ/d)
L_dashdash_b1_d_t(ndarray): 1時間当たりの浴槽水栓湯はり時における太陽熱補正給湯熱負荷 (MJ/d)
L_dashdash_b2_d_t(ndarray): 1時間当たりの浴槽追焚時における太陽熱補正給湯熱負荷 (MJ/d)
L_dashdash_ba1_d_t(ndarray): 1時間当たりの浴槽水栓さし湯時における太陽熱補正給湯熱負荷 (MJ/d)
L_dashdash_ba2_d_t(ndarray): 1時間当たりの浴槽追焚時における太陽熱補正給湯熱負荷 (MJ/d)
L_HWH(ndarray): 1日当たりの温水暖房の熱負荷 (MJ/d)
CO2HP(dict): CO2HPのパラメーター
Returns:
ndarray: 1時間当たりの給湯機の消費電力量 (MJ/h)
"""
if hw_type == 'ガス従来型給湯機' or hw_type == 'ガス従来型給湯温水暖房機' \
or hw_type == 'ガス潜熱回収型給湯機' or hw_type == 'ガス潜熱回収型給湯温水暖房機':
return gas.calc_E_E_hs_d_t(
W_dash_k_d_t=W_dash_k_d_t,
W_dash_s_d_t=W_dash_s_d_t,
W_dash_w_d_t=W_dash_w_d_t,
W_dash_b1_d_t=W_dash_b1_d_t,
W_dash_b2_d_t=W_dash_b2_d_t,
W_dash_ba1_d_t=W_dash_ba1_d_t,
theta_ex_d_Ave_d=theta_ex_d_Ave_d,
L_dashdash_ba2_d_t=L_dashdash_ba2_d_t
)
elif hw_type == '石油従来型給湯機' or hw_type == '石油従来型給湯温水暖房機' \
or hw_type == '石油潜熱回収型給湯機' or hw_type == '石油潜熱回収型給湯温水暖房機':
return oil.calc_E_E_hs_d_t(W_dash_k_d_t=W_dash_k_d_t, W_dash_s_d_t=W_dash_s_d_t, W_dash_w_d_t=W_dash_w_d_t,
W_dash_b1_d_t=W_dash_b1_d_t, W_dash_ba1_d_t=W_dash_ba1_d_t,
W_dash_b2_d_t=W_dash_b2_d_t, theta_ex_d_Ave_d=theta_ex_d_Ave_d,
L_dashdash_ba2_d_t=L_dashdash_ba2_d_t)
elif hw_type == '電気ヒートポンプ給湯機':
return eheatpump.calc_E_E_hs_d_t(
L_dashdash_k_d_t=L_dashdash_k_d_t,
L_dashdash_s_d_t=L_dashdash_s_d_t,
L_dashdash_w_d_t=L_dashdash_w_d_t,
L_dashdash_b1_d_t=L_dashdash_b1_d_t,
L_dashdash_b2_d_t=L_dashdash_b2_d_t,
L_dashdash_ba1_d_t=L_dashdash_ba1_d_t,
L_dashdash_ba2_d_t=L_dashdash_ba2_d_t,
e_rtd=e_rtd,
theta_ex_d_Ave_d=theta_ex_d_Ave_d,
theta_ex_Nave_d=Theta_ex_Nave_d,
CO2HP=CO2HP
)
elif hw_type == '電気ヒーター給湯機' or hw_type == '電気ヒーター給湯温水暖房機':
return eheater.calc_E_E_hs_d_t(
L_dashdash_k_d_t=L_dashdash_k_d_t,
L_dashdash_s_d_t=L_dashdash_s_d_t,
L_dashdash_w_d_t=L_dashdash_w_d_t,
L_dashdash_b1_d_t=L_dashdash_b1_d_t,
L_dashdash_b2_d_t=L_dashdash_b2_d_t,
L_dashdash_ba1_d_t=L_dashdash_ba1_d_t,
L_dashdash_ba2_d_t=L_dashdash_ba2_d_t,
theta_ex_d_Ave_d=theta_ex_d_Ave_d
)
elif hw_type == '電気ヒートポンプ・ガス瞬間式併用型給湯温水暖房機(給湯熱源:電気ヒートポンプ・ガス瞬間式併用、暖房熱源:ガス瞬間式)(仕様による)' \
or hw_type == '電気ヒートポンプ・ガス併用型給湯機(仕様による)':
return hybrid_gas.calc_E_E_hs_d_t(
hybrid_category=hybrid_category,
theta_ex_d_Ave_d=theta_ex_d_Ave_d,
L_dashdash_k_d_t=L_dashdash_k_d_t,
L_dashdash_s_d_t=L_dashdash_s_d_t,
L_dashdash_w_d_t=L_dashdash_w_d_t,
L_dashdash_b2_d_t=L_dashdash_b2_d_t,
L_dashdash_ba2_d_t=L_dashdash_ba2_d_t
)
elif hw_type == '電気ヒートポンプ・ガス瞬間式併用型給湯温水暖房機(給湯熱源:電気ヒートポンプ・ガス瞬間式併用、暖房熱源:ガス瞬間式)(試験された値を用いる)' \
or hw_type == '電気ヒートポンプ・ガス併用型給湯機(試験された値を用いる)':
return hybrid_gas_3.calc_E_E_hs_d_t(
bath_function=bath_function,
package_id=package_id,
hybrid_param=hybrid_param,
W_dash_ba1_d_t=W_dash_ba1_d_t,
theta_ex_d_Ave_d=theta_ex_d_Ave_d,
L_dashdash_k_d_t=L_dashdash_k_d_t,
L_dashdash_s_d_t=L_dashdash_s_d_t,
L_dashdash_w_d_t=L_dashdash_w_d_t,
L_dashdash_b1_d_t=L_dashdash_b1_d_t,
L_dashdash_b2_d_t=L_dashdash_b2_d_t,
L_dashdash_ba1_d_t=L_dashdash_ba1_d_t,
L_dashdash_ba2_d_t=L_dashdash_ba2_d_t
)
elif hw_type == '電気ヒートポンプ・ガス瞬間式併用型給湯温水暖房機(給湯熱源:ガス瞬間式、暖房熱源:電気ヒートポンプ・ガス瞬間式併用)':
return gas_hybrid.get_E_E_hs(
W_dash_k_d_t=W_dash_k_d_t,
W_dash_s_d_t=W_dash_s_d_t,
W_dash_w_d_t=W_dash_w_d_t,
W_dash_b1_d_t=W_dash_b1_d_t,
W_dash_b2_d_t=W_dash_b2_d_t,
W_dash_ba1_d_t=W_dash_ba1_d_t,
theta_ex_d_Ave_d=theta_ex_d_Ave_d,
L_dashdash_ba2_d_t=L_dashdash_ba2_d_t
)
elif hw_type == '電気ヒートポンプ・ガス瞬間式併用型給湯温水暖房機(給湯熱源:電気ヒートポンプ・ガス瞬間式併用、暖房熱源:電気ヒートポンプ・ガス瞬間式併用)':
return whybrid.calc_E_E_hs_d_t(
L_HWH=L_HWH,
hybrid_category=hybrid_category,
theta_ex_d_Ave_d=theta_ex_d_Ave_d,
L_dashdash_k_d_t=L_dashdash_k_d_t,
L_dashdash_s_d_t=L_dashdash_s_d_t,
L_dashdash_w_d_t=L_dashdash_w_d_t,
L_dashdash_b2_d_t=L_dashdash_b2_d_t,
L_dashdash_ba2_d_t=L_dashdash_ba2_d_t
)
else:
raise ValueError(hw_type)
def calc_E_G_hs_d(hw_type, hybrid_category, e_rtd, e_dash_rtd, bath_function, package_id, Theta_ex_Nave, W_dash_k_d_t, W_dash_s_d_t,
W_dash_w_d_t, W_dash_b1_d_t, W_dash_b2_d_t, W_dash_ba1_d_t, Theta_ex_Ave, L_dashdash_k_d_t,
L_dashdash_s_d_t, L_dashdash_w_d_t,
L_dashdash_b1_d_t, L_dashdash_b2_d_t, L_dashdash_ba1_d_t, L_dashdash_ba2_d_t, L_HWH, hybrid_param):
"""1日当たりの給湯機のガス消費量
Args:
hw_type(str): 給湯機/給湯温水暖房機の種類
hybrid_category(str): 電気ヒートポンプ・ガス瞬間式併用型給湯温水暖房機の区分
e_rtd(float): 当該給湯機の効率
e_dash_rtd(float): エネルギーの使用の合理化に関する法律」に基づく「特定機器の性能の向上に関する製造事業者等の 判断の基準等」(ガス温水機器)に定義される「エネルギー消費効率」
bath_function(str): ふろ機能の種類
Theta_ex_Nave(ndarray): 夜間平均外気温 (℃)
W_dash_k_d_t(ndarray): 1時間当たりの台所水栓における節湯補正給湯量 (L/h)
W_dash_s_d_t(ndarray): 1時間当たりの浴室シャワー水栓における節湯補正給湯量 (L/h)
W_dash_w_d_t(ndarray): 1時間当たりの洗面水栓における節湯補正給湯量 (L/h)
W_dash_b1_d_t(ndarray): 1時間当たりの浴槽水栓湯はり時における節湯補正給湯量 (L/h)
W_dash_b2_d_t(ndarray): 1時間当たりの浴槽追焚時における節湯補正給湯量 (L/h)
W_dash_ba1_d_t(ndarray): 1時間当たりの浴槽水栓さし湯時における節湯補正給湯量 (L/h)
Theta_ex_Ave(ndarray): 日平均外気温度 (℃)
L_dashdash_k_d: 1時間当たりの台所水栓における太陽熱補正給湯熱負荷 (MJ/h)
L_dashdash_s_d: 1時間当たりの浴室シャワー水栓における太陽熱補正給湯熱負荷 (MJ/h)
L_dashdash_w_d: 1時間当たりの洗面水栓における太陽熱補正給湯熱負荷 (MJ/h)
L_dashdash_b1_d: 1時間当たりの浴槽水栓湯はり時における太陽熱補正給湯熱負荷 (MJ/h)
L_dashdash_b2_d: 1時間当たりの浴槽追焚時における太陽熱補正給湯熱負荷 (MJ/h)
L_dashdash_ba1_d: 1時間当たりの浴槽水栓さし湯時における太陽熱補正給湯熱負荷 (MJ/h)
L_dashdash_ba2_d: 1時間当たりの浴槽追焚時における太陽熱補正給湯熱負荷 (MJ/h)
L_HWH(ndarray): 1日当たりの温水暖房の熱負荷 (MJ/d)
package_id: param L_dashdash_k_d_t:
L_dashdash_s_d_t: param L_dashdash_w_d_t:
L_dashdash_b1_d_t: param L_dashdash_b2_d_t:
L_dashdash_ba1_d_t: param L_dashdash_ba2_d_t:
hybrid_param: returns: 1時間当たりの給湯機のガス消費量 (MJ/h)
L_dashdash_k_d_t:
L_dashdash_w_d_t:
L_dashdash_b2_d_t:
L_dashdash_ba2_d_t:
Returns:
ndarray: 1時間当たりの給湯機のガス消費量 (MJ/h)
"""
if hw_type == 'ガス従来型給湯機' or hw_type == 'ガス従来型給湯温水暖房機' \
or hw_type == 'ガス潜熱回収型給湯機' or hw_type == 'ガス潜熱回収型給湯温水暖房機':
return gas.calc_E_G_hs_d_t(
hw_type=hw_type,
e_rtd=e_rtd,
e_dash_rtd=e_dash_rtd,
theta_ex_d_Ave_d=Theta_ex_Ave,
L_dashdash_k_d_t=L_dashdash_k_d_t,
L_dashdash_s_d_t=L_dashdash_s_d_t,
L_dashdash_w_d_t=L_dashdash_w_d_t,
L_dashdash_b1_d_t=L_dashdash_b1_d_t,
L_dashdash_b2_d_t=L_dashdash_b2_d_t,
L_dashdash_ba1_d_t=L_dashdash_ba1_d_t,
L_dashdash_ba2_d_t=L_dashdash_ba2_d_t,
bath_function=bath_function
)
elif hw_type == '石油従来型給湯機' or hw_type == '石油従来型給湯温水暖房機' \
or hw_type == '石油潜熱回収型給湯機' or hw_type == '石油潜熱回収型給湯温水暖房機':
return oil.get_E_G_hs_d_t()
elif hw_type == '電気ヒートポンプ給湯機':
return eheatpump.get_E_G_hs_d_t()
elif hw_type == '電気ヒーター給湯機' or hw_type == '電気ヒーター給湯温水暖房機':
return eheater.get_E_G_hs()
elif hw_type == '電気ヒートポンプ・ガス瞬間式併用型給湯温水暖房機(給湯熱源:電気ヒートポンプ・ガス瞬間式併用、暖房熱源:ガス瞬間式)(仕様による)' \
or hw_type == '電気ヒートポンプ・ガス併用型給湯機(仕様による)':
return hybrid_gas.calc_E_G_hs_d_t(
hybrid_category=hybrid_category,
theta_ex_d_Ave_d=Theta_ex_Ave,
L_dashdash_k_d_t=L_dashdash_k_d_t,
L_dashdash_s_d_t=L_dashdash_s_d_t,
L_dashdash_w_d_t=L_dashdash_w_d_t,
L_dashdash_b2_d_t=L_dashdash_b2_d_t,
L_dashdash_ba2_d_t=L_dashdash_ba2_d_t
)
elif hw_type == '電気ヒートポンプ・ガス瞬間式併用型給湯温水暖房機(給湯熱源:電気ヒートポンプ・ガス瞬間式併用、暖房熱源:ガス瞬間式)(試験された値を用いる)' \
or hw_type == '電気ヒートポンプ・ガス併用型給湯機(試験された値を用いる)':
return hybrid_gas_3.get_E_G_hs_d_t(
bath_function=bath_function,
package_id=package_id,
theta_ex_d_Ave_d=Theta_ex_Ave,
L_dashdash_k_d_t=L_dashdash_k_d_t,
L_dashdash_s_d_t=L_dashdash_s_d_t,
L_dashdash_w_d_t=L_dashdash_w_d_t,
L_dashdash_b1_d_t=L_dashdash_b1_d_t,
L_dashdash_b2_d_t=L_dashdash_b2_d_t,
L_dashdash_ba1_d_t=L_dashdash_ba1_d_t,
L_dashdash_ba2_d_t=L_dashdash_ba2_d_t,
W_dash_ba1_d_t=W_dash_ba1_d_t,
hybrid_param=hybrid_param
)
elif hw_type == '電気ヒートポンプ・ガス瞬間式併用型給湯温水暖房機(給湯熱源:ガス瞬間式、暖房熱源:電気ヒートポンプ・ガス瞬間式併用)':
return gas_hybrid.get_E_G_hs(
Theta_ex_Ave=Theta_ex_Ave,
L_dashdash_k=L_dashdash_k_d_t,
L_dashdash_s=L_dashdash_s_d_t,
L_dashdash_w=L_dashdash_w_d_t,
L_dashdash_b1=L_dashdash_b1_d_t,
L_dashdash_b2=L_dashdash_b2_d_t,
L_dashdash_ba1=L_dashdash_ba1_d_t,
L_dashdash_ba2=L_dashdash_ba2_d_t,
bath_function=bath_function
)
elif hw_type == '電気ヒートポンプ・ガス瞬間式併用型給湯温水暖房機(給湯熱源:電気ヒートポンプ・ガス瞬間式併用、暖房熱源:電気ヒートポンプ・ガス瞬間式併用)':
return whybrid.calc_E_G_hs_d_t(
L_HWH=L_HWH,
hybrid_category=hybrid_category,
Theta_ex_Ave=Theta_ex_Ave,
L_dashdash_k_d_t=L_dashdash_k_d_t,
L_dashdash_s_d_t=L_dashdash_s_d_t,
L_dashdash_w_d_t=L_dashdash_w_d_t,
L_dashdash_b2_d_t=L_dashdash_b2_d_t,
L_dashdash_ba2_d_t=L_dashdash_ba2_d_t
)
elif hw_type == 'コージェネレーションを使用する':
return np.zeros(365)
else:
raise ValueError(hw_type)
def calc_E_K_hs_d_t(hw_type, e_rtd, e_dash_rtd, bath_function, theta_ex_d_Ave_d, L_dashdash_k_d_t, L_dashdash_s_d_t,
L_dashdash_w_d_t,
L_dashdash_b1_d_t, L_dashdash_b2_d_t, L_dashdash_ba1_d_t, L_dashdash_ba2_d_t):
"""1時間当たりの給湯機の灯油消費量 (MJ/h)
Args:
hw_type(str): 給湯機/給湯温水暖房機の種類
e_rtd(float): 当該給湯機の効率
e_dash_rtd(float): エネルギーの使用の合理化に関する法律」に基づく「特定機器の性能の向上に関する製造事業者等の 判断の基準等」(ガス温水機器)に定義される「エネルギー消費効率」
bath_function(str): ふろ機能の種類
theta_ex_d_Ave_d(ndarray): 日平均外気温度 (℃)
L_dashdash_w_d_t(ndarray): 1時間当たりの洗面水栓における太陽熱補正給湯熱負荷 (MJ/h)
L_dashdash_k_d_t(ndarray): 1時間当たりの台所水栓における太陽熱補正給湯熱負荷 (MJ/h)
L_dashdash_s_d_t(ndarray): 1時間当たりの浴室シャワー水栓における太陽熱補正給湯熱負荷 (MJ/h)
L_dashdash_b1_d_t(ndarray): 1時間当たりの浴槽水栓湯はり時における太陽熱補正給湯熱負荷 (MJ/h)
L_dashdash_b2_d_t(ndarray): 1時間当たりの浴槽追焚時における太陽熱補正給湯熱負荷 (MJ/h)
L_dashdash_ba1_d_t(ndarray): 1時間当たりの浴槽水栓さし湯時における太陽熱補正給湯熱負荷 (MJ/h)
L_dashdash_ba2_d_t(ndarray): 1時間当たりの浴槽追焚時における太陽熱補正給湯熱負荷 (MJ/h)
Returns:
ndarray: 1時間当たりの給湯機の灯油消費量 (MJ/h)
"""
if hw_type == 'ガス従来型給湯機' or hw_type == 'ガス従来型給湯温水暖房機' \
or hw_type == 'ガス潜熱回収型給湯機' or hw_type == 'ガス潜熱回収型給湯温水暖房機':
return gas.get_E_K_hs_d_t()
elif hw_type == '石油従来型給湯機' or hw_type == '石油従来型給湯温水暖房機' \
or hw_type == '石油潜熱回収型給湯機' or hw_type == '石油潜熱回収型給湯温水暖房機':
return oil.calc_E_K_hs_d_t(
hw_type=hw_type,
bath_function=bath_function,
e_rtd=e_rtd,
e_dash_rtd=e_dash_rtd,
theta_ex_d_Ave_d=theta_ex_d_Ave_d,
L_dashdash_k_d_t=L_dashdash_k_d_t,
L_dashdash_s_d_t=L_dashdash_s_d_t,
L_dashdash_w_d_t=L_dashdash_w_d_t,
L_dashdash_b1_d_t=L_dashdash_b1_d_t,
L_dashdash_b2_d_t=L_dashdash_b2_d_t,
L_dashdash_ba1_d_t=L_dashdash_ba1_d_t,
L_dashdash_ba2_d_t=L_dashdash_ba2_d_t
)
elif hw_type == '電気ヒートポンプ給湯機':
return eheatpump.get_E_K_hs_d_t()
elif hw_type == '電気ヒーター給湯機' or hw_type == '電気ヒーター給湯温水暖房機':
return eheater.get_E_K_hs()
elif hw_type == '電気ヒートポンプ・ガス瞬間式併用型給湯温水暖房機(給湯熱源:電気ヒートポンプ・ガス瞬間式併用、暖房熱源:ガス瞬間式)(仕様による)' \
or hw_type == '電気ヒートポンプ・ガス併用型給湯機(仕様による)':
return gas_hybrid.get_E_K_hs()
elif hw_type == '電気ヒートポンプ・ガス瞬間式併用型給湯温水暖房機(給湯熱源:電気ヒートポンプ・ガス瞬間式併用、暖房熱源:ガス瞬間式)(試験された値を用いる)' \
or hw_type == '電気ヒートポンプ・ガス併用型給湯機(試験された値を用いる)':
return hybrid_gas.get_E_K_hs_d_t()
elif hw_type == '電気ヒートポンプ・ガス瞬間式併用型給湯温水暖房機(給湯熱源:ガス瞬間式、暖房熱源:電気ヒートポンプ・ガス瞬間式併用)':
return hybrid_gas.get_E_K_hs_d_t()
elif hw_type == '電気ヒートポンプ・ガス瞬間式併用型給湯温水暖房機(給湯熱源:電気ヒートポンプ・ガス瞬間式併用、暖房熱源:電気ヒートポンプ・ガス瞬間式併用)':
return whybrid.get_E_K_hs_d_t()
elif hw_type == 'コージェネレーションを使用する':
return np.zeros(365)
else:
raise ValueError(hw_type)
def get_normalized_bath_function(hw_type, bath_function):
"""表4 評価可能な給湯機/給湯温水暖房機の種類
Args:
hw_type(str): 給湯機/給湯温水暖房機の種類
bath_function(str): ふろ機能の種類
Returns:
str: 評価可能な給湯機/給湯温水暖房機の種類
"""
if hw_type == 'ガス従来型給湯機' or hw_type == 'ガス従来型給湯温水暖房機' \
or hw_type == 'ガス潜熱回収型給湯機' or hw_type == 'ガス潜熱回収型給湯温水暖房機':
return bath_function
elif hw_type == '石油従来型給湯機' or hw_type == '石油従来型給湯温水暖房機' \
or hw_type == '石油潜熱回収型給湯機' or hw_type == '石油潜熱回収型給湯温水暖房機':
return bath_function
elif hw_type == '電気ヒートポンプ給湯機':
return bath_function
elif hw_type == '電気ヒーター給湯機' or hw_type == '電気ヒーター給湯温水暖房機':
return bath_function
elif hw_type == '電気ヒートポンプ・ガス瞬間式併用型給湯温水暖房機(給湯熱源:電気ヒートポンプ・ガス瞬間式併用、暖房熱源:ガス瞬間式)(試験された値を用いる)' \
or hw_type == '電気ヒートポンプ・ガス瞬間式併用型給湯温水暖房機(給湯熱源:電気ヒートポンプ・ガス瞬間式併用、暖房熱源:ガス瞬間式)(仕様による)' \
or hw_type == '電気ヒートポンプ・ガス併用型給湯機(試験された値を用いる)' \
or hw_type == '電気ヒートポンプ・ガス併用型給湯機(仕様による)':
return "ふろ給湯機(追焚あり)"
elif hw_type == '電気ヒートポンプ・ガス瞬間式併用型給湯温水暖房機(給湯熱源:ガス瞬間式、暖房熱源:電気ヒートポンプ・ガス瞬間式併用)':
return "ふろ給湯機(追焚あり)"
elif hw_type == '電気ヒートポンプ・ガス瞬間式併用型給湯温水暖房機(給湯熱源:電気ヒートポンプ・ガス瞬間式併用、暖房熱源:電気ヒートポンプ・ガス瞬間式併用)':
return "ふろ給湯機(追焚あり)"
elif hw_type == 'コージェネレーションを使用する':
return bath_function
else:
raise ValueError(hw_type)
# ============================================================================
# 7. 太陽熱補正給湯熱負荷
# ============================================================================
def calc_L_dashdash_k_d_t(L_dash_k_d_t, L_dash_s_d_t, L_dash_w_d_t, L_dash_b1_d_t, L_dash_b2_d_t, L_dash_ba1_d_t,
L_sun_d_t):
"""1時間当たりの台所水栓における太陽熱補正給湯熱負荷 (MJ/h) (4a)
Args:
L_dash_k_d_t(ndarray): 1時間当たりの台所水栓における節湯補正給湯熱負荷 (MJ/h)
L_dash_s_d_t(ndarray): 1時間当たりの浴室シャワー水栓における節湯補正給湯熱負荷 (MJ/h)
L_dash_w_d_t(ndarray): 1時間当たりの洗面水栓における節湯補正給湯熱負荷 (MJ/h)
L_dash_b1_d_t(ndarray): 1時間当たりの浴槽水栓湯はり時における節湯補正給湯熱負荷 (MJ/h)
L_dash_b2_d_t(ndarray): 1時間当たりの浴槽追焚時における節湯補正給湯熱負荷 (MJ/h)
L_dash_ba1_d_t(ndarray): 1時間当たりの浴槽水栓さし湯時における節湯補正給湯熱負荷 (MJ/h)
L_sun_d_t(ndarray): 1時間当たりの太陽熱利用給湯設備による補正集熱量 (MJ/h)
Returns:
ndarray: 1時間当たりの台所水栓における太陽熱補正給湯熱負荷 (MJ/h)
"""
L_dashdash_k_d_t = np.zeros(24 * 365)
L_dash_d_t = L_dash_k_d_t + L_dash_s_d_t + L_dash_w_d_t + L_dash_b1_d_t + L_dash_b2_d_t + L_dash_ba1_d_t
f = L_dash_d_t > 0
L_dashdash_k_d_t[f] = L_dash_k_d_t[f] - L_sun_d_t[f] * (L_dash_k_d_t[f] / L_dash_d_t[f])
return L_dashdash_k_d_t
def calc_L_dashdash_s_d_t(L_dash_k_d_t, L_dash_s_d_t, L_dash_w_d_t, L_dash_b1_d_t, L_dash_b2_d_t, L_dash_ba1_d_t,
L_sun_d_t):
"""1時間当たりの浴室シャワー水栓における太陽熱補正給湯熱負荷 (MJ/h) (4b)
Args:
L_dash_k_d_t(ndarray): 1時間当たりの台所水栓における節湯補正給湯熱負荷 (MJ/h)
L_dash_s_d_t(ndarray): 1時間当たりの浴室シャワー水栓における節湯補正給湯熱負荷 (MJ/h)
L_dash_w_d_t(ndarray): 1時間当たりの洗面水栓における節湯補正給湯熱負荷 (MJ/h)
L_dash_b1_d_t(ndarray): 1時間当たりの浴槽水栓湯はり時における節湯補正給湯熱負荷 (MJ/h)
L_dash_b2_d_t(ndarray): 1時間当たりの浴槽追焚時における節湯補正給湯熱負荷 (MJ/h)
L_dash_ba1_d_t(ndarray): 1時間当たりの浴槽水栓さし湯時における節湯補正給湯熱負荷 (MJ/h)
L_sun_d_t(ndarray): 1時間当たりの太陽熱利用給湯設備による補正集熱量 (MJ/h)
Returns:
ndarray: 1時間当たりの浴室シャワー水栓における太陽熱補正給湯熱負荷 (MJ/h)
"""
L_dashdash_s_d_t = np.zeros(24 * 365)
L_dash_d_t = L_dash_k_d_t + L_dash_s_d_t + L_dash_w_d_t + L_dash_b1_d_t + L_dash_b2_d_t + L_dash_ba1_d_t
f = L_dash_d_t > 0
L_dashdash_s_d_t[f] = L_dash_s_d_t[f] - L_sun_d_t[f] * (L_dash_s_d_t[f] / L_dash_d_t[f])
return L_dashdash_s_d_t
def calc_L_dashdash_w_d_t(L_dash_k_d_t, L_dash_s_d_t, L_dash_w_d_t, L_dash_b1_d_t, L_dash_b2_d_t, L_dash_ba1_d_t,
L_sun_d_t):
"""1時間当たりの洗面水栓における太陽熱補正給湯熱負荷 (MJ/h) (4c)
Args:
L_dash_k_d_t(ndarray): 1時間当たりの台所水栓における節湯補正給湯熱負荷 (MJ/h)
L_dash_s_d_t(ndarray): 1時間当たりの浴室シャワー水栓における節湯補正給湯熱負荷 (MJ/h)
L_dash_w_d_t(ndarray): 1時間当たりの洗面水栓における節湯補正給湯熱負荷 (MJ/h)
L_dash_b1_d_t(ndarray): 1時間当たりの浴槽水栓湯はり時における節湯補正給湯熱負荷 (MJ/h)
L_dash_b2_d_t(ndarray): 1時間当たりの浴槽追焚時における節湯補正給湯熱負荷 (MJ/h)
L_dash_ba1_d_t(ndarray): 1時間当たりの浴槽水栓さし湯時における節湯補正給湯熱負荷 (MJ/h)
L_sun_d_t(ndarray): 1時間当たりの太陽熱利用給湯設備による補正集熱量 (MJ/h)
Returns:
ndarray: 1時間当たりの洗面水栓における太陽熱補正給湯熱負荷 (MJ/h)
"""
L_dashdash_w_d_t = np.zeros(24 * 365)
L_dash_d_t = L_dash_k_d_t + L_dash_s_d_t + L_dash_w_d_t + L_dash_b1_d_t + L_dash_b2_d_t + L_dash_ba1_d_t
f = L_dash_d_t > 0
L_dashdash_w_d_t[f] = L_dash_w_d_t[f] - L_sun_d_t[f] * (L_dash_w_d_t[f] / L_dash_d_t[f])
return L_dashdash_w_d_t
def calc_L_dashdash_b1_d_t(L_dash_k_d_t, L_dash_s_d_t, L_dash_w_d_t, L_dash_b1_d_t, L_dash_b2_d_t, L_dash_ba1_d_t,
L_sun_d_t):
"""1時間当たりの浴槽水栓湯はり時における太陽熱補正給湯熱負荷 (MJ/h) (4d)
Args:
L_dash_k_d_t(ndarray): 1時間当たりの台所水栓における節湯補正給湯熱負荷 (MJ/h)
L_dash_s_d_t(ndarray): 1時間当たりの浴室シャワー水栓における節湯補正給湯熱負荷 (MJ/h)
L_dash_w_d_t(ndarray): 1時間当たりの洗面水栓における節湯補正給湯熱負荷 (MJ/h)
L_dash_b1_d_t(ndarray): 1時間当たりの浴槽水栓湯はり時における節湯補正給湯熱負荷 (MJ/h)
L_dash_b2_d_t(ndarray): 1時間当たりの浴槽追焚時における節湯補正給湯熱負荷 (MJ/h)
L_dash_ba1_d_t(ndarray): 1時間当たりの浴槽水栓さし湯時における節湯補正給湯熱負荷 (MJ/h)
L_sun_d_t(ndarray): 1時間当たりの太陽熱利用給湯設備による補正集熱量 (MJ/h)
Returns:
ndarray: 1時間当たりの浴槽水栓湯はり時における太陽熱補正給湯熱負荷 (MJ/h)
"""
L_dashdash_b1_d_t = np.zeros(24 * 365)
L_dash_d_t = L_dash_k_d_t + L_dash_s_d_t + L_dash_w_d_t + L_dash_b1_d_t + L_dash_b2_d_t + L_dash_ba1_d_t
f = L_dash_d_t > 0
L_dashdash_b1_d_t[f] = L_dash_b1_d_t[f] - L_sun_d_t[f] * (L_dash_b1_d_t[f] / L_dash_d_t[f])
return L_dashdash_b1_d_t
def calc_L_dashdash_b2_d_t(L_dash_k_d_t, L_dash_s_d_t, L_dash_w_d_t, L_dash_b1_d_t, L_dash_b2_d_t, L_dash_ba1_d_t,
L_sun_d_t):
"""1時間当たりの浴槽自動湯はり時における太陽熱補正給湯負荷 (MJ/h) (4e)
Args:
L_dash_k_d_t(ndarray): 1時間当たりの台所水栓における節湯補正給湯熱負荷 (MJ/h)
L_dash_s_d_t(ndarray): 1時間当たりの浴室シャワー水栓における節湯補正給湯熱負荷 (MJ/h)
L_dash_w_d_t(ndarray): 1時間当たりの洗面水栓における節湯補正給湯熱負荷 (MJ/h)
L_dash_b1_d_t(ndarray): 1時間当たりの浴槽水栓湯はり時における節湯補正給湯熱負荷 (MJ/h)
L_dash_b2_d_t(ndarray): 1時間当たりの浴槽追焚時における節湯補正給湯熱負荷 (MJ/h)
L_dash_ba1_d_t(ndarray): 1時間当たりの浴槽水栓さし湯時における節湯補正給湯熱負荷 (MJ/h)
L_sun_d_t(ndarray): 1時間当たりの太陽熱利用給湯設備による補正集熱量 (MJ/h)
Returns:
ndarray: 1時間当たりの浴槽自動湯はり時における太陽熱補正給湯負荷 (MJ/h)
"""
L_dashdash_b2_d_t = np.zeros(24 * 365)
L_dash_d_t = L_dash_k_d_t + L_dash_s_d_t + L_dash_w_d_t + L_dash_b1_d_t + L_dash_b2_d_t + L_dash_ba1_d_t
f = L_dash_d_t > 0
L_dashdash_b2_d_t[f] = L_dash_b2_d_t[f] - L_sun_d_t[f] * (L_dash_b2_d_t[f] / L_dash_d_t[f])
return L_dashdash_b2_d_t
def calc_L_dashdash_ba1_d_t(L_dash_k_d_t, L_dash_s_d_t, L_dash_w_d_t, L_dash_b1_d_t, L_dash_b2_d_t, L_dash_ba1_d_t,
L_sun_d_t):
"""1時間当たりの浴槽水栓さし湯時における太陽熱補正給湯負荷 (MJ/h) (4f)
Args:
L_dash_k_d_t(ndarray): 1時間当たりの台所水栓における節湯補正給湯熱負荷 (MJ/h)
L_dash_s_d_t(ndarray): 1時間当たりの浴室シャワー水栓における節湯補正給湯熱負荷 (MJ/h)
L_dash_w_d_t(ndarray): 1時間当たりの洗面水栓における節湯補正給湯熱負荷 (MJ/h)
L_dash_b1_d_t(ndarray): 1時間当たりの浴槽水栓湯はり時における節湯補正給湯熱負荷 (MJ/hd)
L_dash_b2_d_t(ndarray): 1時間当たりの浴槽追焚時における節湯補正給湯熱負荷 (MJ/h)
L_dash_ba1_d_t(ndarray): 1時間当たりの浴槽水栓さし湯時における節湯補正給湯熱負荷 (MJ/h)
L_sun_d_t(ndarray): 1時間当たりの太陽熱利用給湯設備による補正集熱量 (MJ/h)
Returns:
ndarray: 1時間当たりの浴槽水栓さし湯時における太陽熱補正給湯負荷 (MJ/h)
"""
L_dashdash_ba1_d_t = np.zeros(24 * 365)
L_dash_d_t = L_dash_k_d_t + L_dash_s_d_t + L_dash_w_d_t + L_dash_b1_d_t + L_dash_b2_d_t + L_dash_ba1_d_t
f = L_dash_d_t > 0
L_dashdash_ba1_d_t[f] = L_dash_ba1_d_t[f] - L_sun_d_t[f] * (L_dash_ba1_d_t[f] / L_dash_d_t[f])
return L_dashdash_ba1_d_t
def get_L_dashdash_ba2_d_t(L_dash_ba2_d_t):
"""1時間当たりの浴槽追焚時における太陽熱補正給湯負荷 (MJ/h) (4g)
Args:
L_dash_ba2_d_t(ndarray): 1時間当たりの浴槽追焚時における節湯補正給湯負荷 (MJ/h)
Returns:
1時間当たりの浴槽追焚時における太陽熱補正給湯負荷 (MJ/h)
"""
return L_dash_ba2_d_t
def calc_L_sun_d_t(region, sol_region=None, solar_device=None, ls_type=None, A_sp=None, P_alpha_sp=None, P_beta_sp=None,
W_tnk_ss=None, hotwater_use=None, heating_flag_d=None, A_col=None, P_alpha=None, P_beta=None,
V_fan_P0=None, d0=None,
d1=None, m_fan_test=None, W_tnk_ass=None, Theta_wtr_d=None, L_dash_k_d_t=None, L_dash_s_d_t=None,
L_dash_w_d_t=None, L_dash_b1_d_t=None, L_dash_b2_d_t=None, L_dash_ba1_d_t=None):
"""太陽熱利用給湯設備による補正集熱量
Args:
region(int): 省エネルギー地域区分
sol_region(int, optional): 年間の日射地域区分 (Default value = None)
solar_device(str, optional): 太陽熱利用設備の種類 (液体集熱式,空気集熱式,None) (Default value = None)
ls_type(str, optional): 液体集熱式太陽熱利用設備の種類 (太陽熱温水器,ソーラーシステム) (Default value = None)
A_sp(float, optional): 太陽熱集熱部の有効集熱面積 (m2) (Default value = None)
P_alpha_sp(float, optional): 太陽熱集熱部の方位角 (°) (Default value = None)
P_beta_sp(float, optional): 太陽熱集熱部の傾斜角 (°) (Default value = None)
W_tnk_ss(float, optional): ソーラーシステムのタンク容量 (L) (Default value = None)
W_tnk_ass(float, optional): タンク容量 (L) (Default value = None)
Theta_wtr_d(ndarray, optional): 日平均給水温度 (℃) (Default value = None)
L_dash_k_d_t(ndarrayL, optional): 1時間当たりの台所水栓における節湯補正給湯熱負荷 (MJ/h) (Default value = None)
L_dash_s_d_t(ndarray, optional): 1時間当たりの浴室シャワー水栓における節湯補正給湯熱負荷 (MJ/h) (Default value = None)
L_dash_w_d_t(ndarray, optional): 1時間当たりの洗面水栓における節湯補正給湯熱負荷 (MJ/h) (Default value = None)
L_dash_b1_d_t(ndarray, optional): 1時間当たりの浴槽水栓湯はりにおける節湯補正給湯熱負荷 (MJ/h) (Default value = None)
L_dash_b2_d_t(ndarray, optional): 1時間当たりの浴槽自動湯はりにおける節湯補正給湯熱負荷 (MJ/h) (Default value = None)
L_dash_ba1_d_t(ndarray, optional): 1時間当たりの浴槽水栓さし湯における節湯補正給湯熱負荷 (MJ/h) (Default value = None)
hotwater_use: Default value = None)
heating_flag_d: Default value = None)
A_col: Default value = None)
P_alpha: Default value = None)
P_beta: Default value = None)
V_fan_P0: Default value = None)
d0: Default value = None)
d1: Default value = None)
m_fan_test: Default value = None)
Returns:
ndarray: 1時間当たりの太陽熱利用設備による補正集熱量 (MJ/h)
"""
if solar_device == '液体集熱式':
return lss.calc_L_sun_lss_d_t(
region=region,
sol_region=sol_region,
ls_type=ls_type,
A_sp=A_sp,
P_alpha_sp=P_alpha_sp,
P_beta_sp=P_beta_sp,
W_tnk_ss=W_tnk_ss,
Theta_wtr_d=Theta_wtr_d,
L_dash_k_d_t=L_dash_k_d_t,
L_dash_s_d_t=L_dash_s_d_t,
L_dash_w_d_t=L_dash_w_d_t,
L_dash_b1_d_t=L_dash_b1_d_t,
L_dash_b2_d_t=L_dash_b2_d_t,
L_dash_ba1_d_t=L_dash_ba1_d_t
)
elif solar_device == '空気集熱式':
if hotwater_use == True:
outdoor = load_outdoor()
Theta_ex_d_t = get_Theta_ex(region, outdoor)
Theta_col_nonopg_d_t, Theta_col_opg_d_t = ass.calc_Theta_col(A_col, P_alpha, P_beta, V_fan_P0, d0, d1,
m_fan_test, region, sol_region, Theta_ex_d_t)
t_fan_d_t = ass.get_t_fan_d_t(Theta_col_nonopg_d_t, Theta_col_opg_d_t)
t_cp_d_t = ass.get_t_cp_d_t(hotwater_use, t_fan_d_t, heating_flag_d)
V_fan_d_t = ass.get_V_fan_d_t(t_fan_d_t, V_fan_P0)
Q_col_d_t = ass.get_Q_col_d_t(V_fan_d_t, Theta_col_opg_d_t, Theta_ex_d_t)
Q_d = ass.calc_Q_d(Q_col_d_t, t_cp_d_t)
L_tnk_d = ass.calc_L_tnk_d(Q_d, W_tnk_ass, Theta_wtr_d)
return ass.calc_L_sun_ass_d_t(L_tnk_d, L_dash_k_d_t, L_dash_s_d_t, L_dash_w_d_t, L_dash_b1_d_t,
L_dash_b2_d_t, L_dash_ba1_d_t)
else:
return np.zeros(24 * 365)
elif solar_device is None:
return np.zeros(24 * 365)
else:
raise ValueError(solar_device)
# ============================================================================
# 8. 節湯補正給湯熱負荷
# ============================================================================
def get_L_dash_k_d_t(W_dash_k_d_t, Theta_sw_k, Theta_wtr_d):
"""台所水栓における節湯補正給湯負荷 (MJ/h) (5a)
Args:
W_dash_k_d_t(ndarray): 1時間当たりの台所水栓における節湯補正給湯量 (L/h)
Theta_sw_k(int): 台所水栓における基給湯量 (℃)
Theta_wtr_d(ndarray): 日平均給水温度 (℃)
Returns:
ndarray: 台所水栓における節湯補正給湯負荷 (MJ/h)
"""
return W_dash_k_d_t * (Theta_sw_k - np.repeat(Theta_wtr_d, 24)) * 4.186 * 10 ** (-3)
def get_L_dash_s_d_t(W_dash_s_d_t, Theta_sw_s, Theta_wtr_d):
"""浴室シャワー水栓における節湯補正給湯負荷 (5b)
Args:
W_dash_s_d_t(ndarray): 1時間当たりの浴室シャワーにおける節湯補正給湯量 (L/h)
Theta_sw_s(int): 浴室シャワーにおける基給湯量 (℃)
Theta_wtr_d(ndarray): 日平均給水温度 (℃)
Returns:
ndarray: 浴室シャワーにおける節湯補正給湯負荷 (MJ/h)
"""
return W_dash_s_d_t * (Theta_sw_s - np.repeat(Theta_wtr_d, 24)) * 4.186 * 10 ** (-3)
def get_L_dash_w_d_t(W_dash_w_d_t, Theta_sw_w, Theta_wtr_d):
"""洗面水栓における節湯補正給湯負荷 (5c)
Args:
W_dash_w_d_t(ndarray): 1時間当たりの洗面水栓における節湯補正給湯量 (L/d)
Theta_sw_w(int): 洗面水栓における基給湯量 (℃)
Theta_wtr_d(ndarray): 日平均給水温度 (℃)
Returns:
ndarray: 洗面水栓における節湯補正給湯負荷 (MJ/d)
"""
return W_dash_w_d_t * (Theta_sw_w - np.repeat(Theta_wtr_d, 24)) * 4.186 * 10 ** (-3)
def get_L_dash_bx_d_t(W_dash_b1_d_t, W_dash_b2_d_t, Theta_wtr_d, has_bath, bash_function):
"""浴槽水栓湯はり時における節水補正給湯熱負荷 L_dash_b1_d, L_dash_b2_d
Args:
W_dash_b1_d_t(ndarray): 1時間当たりの浴槽水栓湯はり時における節湯補正給湯量 (L/d)
W_dash_b2_d_t(ndarray): 1時間当たりの浴槽自動湯はり時における節湯補正給湯量 (L/d)
Theta_wtr_d(ndarray): 日平均給水温度 (℃)
has_bath(bool): 浴室用の有無
bash_function(str): ふろ機能の種類
Returns:
ndarray: 浴槽水栓湯はり時・浴槽自動湯はり時における節水補正給湯熱負荷 (MJ/d)
"""
if has_bath == False:
L_dash_b1_d_t = np.zeros(24 * 365) # (5-1d)
L_dash_b2_d_t = np.zeros(24 * 365) # (5-1e)
return L_dash_b1_d_t, L_dash_b2_d_t
elif bash_function == '給湯単機能':
Theta_sw_b1 = get_Theta_sw_b1()
L_dash_b1_d_t = W_dash_b1_d_t * (Theta_sw_b1 - np.repeat(Theta_wtr_d, 24)) * 4.186 * 10 ** (-3) # (5-2d)
L_dash_b2_d_t = np.zeros(24 * 365) # (5-2e)
return L_dash_b1_d_t, L_dash_b2_d_t
elif bash_function == 'ふろ給湯機(追焚あり)' or bash_function == 'ふろ給湯機(追焚なし)':
Theta_sw_b2 = get_Theta_sw_b2()
L_dash_b1_d_t = np.zeros(24 * 365) # (5-3d)
L_dash_b2_d_t = W_dash_b2_d_t * (Theta_sw_b2 - np.repeat(Theta_wtr_d, 24)) * 4.186 * 10 ** (-3) # (5-3e)
return L_dash_b1_d_t, L_dash_b2_d_t
else:
raise ValueError(bash_function)
def get_L_dash_bax_d_t(W_dash_ba1_d_t, Theta_wtr_d, L_ba_d_t, has_bath, bash_function):
"""浴槽水栓さし湯時における節水補正給湯熱負荷 L_dash_ba1_d, L_dash_ba2_d
Args:
W_dash_ba1_d_t(ndarray): 1時間当たりの浴槽水栓湯はり時における節湯補正給湯量 (L/h)
Theta_wtr_d(ndarray): 日平均給水温度 (℃)
L_ba_d_t(ndarray): 1時間当たりの浴槽沸かし直しによる給湯熱負荷 (MJ/h)
has_bath(bool): 浴室等の有無
bash_function(str): ふろ機能の種類 (給湯単機能,ふろ給湯機(追焚なし),ふろ給湯機(追焚あり))
Returns:
ndarray: 浴槽水栓さし湯時/浴槽追焚時における節水補正給湯熱負荷 (MJ/h)
"""
if has_bath == False:
L_dash_ba1_d_t = np.zeros(24 * 365) # (5-1f)
L_dash_ba2_d_t = | np.zeros(24 * 365) | numpy.zeros |
import itertools
import json
import numpy as np
import scipy.stats as stats
import trilearn.graph.decomposable
import trilearn.graph.junction_tree as libj
import trilearn.auxiliary_functions as aux
from trilearn.distributions import dirichlet
def ll_complete_set_ratio(comp, alpha, counts, data, levels, cache):
""" The ratio of normalizing constants for a posterior Dirichlet
distribution defined ofer a complete set (clique or separator).
I(alpha + n) / I(alpha)
Args:
comp: Clique or separator.
alpha: Pseudo counts for each cell.
"""
if comp not in counts:
counts[comp] = aux.get_marg_counts(data, list(comp))
if comp not in cache:
nodes = list(comp)
c1 = dirichlet.log_norm_constant_multidim(counts[comp],
alpha,
levels[nodes])
c2 = dirichlet.log_norm_constant_multidim({},
alpha,
levels[nodes])
cache[comp] = c1 - c2
return cache[comp]
def log_likelihood_partial(cliques, separators, no_levels, cell_alpha, counts, data, levels, cache):
cliques_constants = 0.0
tot_no_cells = np.prod([l for l in no_levels])
for c in cliques:
# Setting constant alpha here
no_cells_outside = np.prod([l for i, l in
enumerate(no_levels) if
i not in c])
alpha = cell_alpha * no_cells_outside / tot_no_cells
cliques_constants += ll_complete_set_ratio(c, alpha, counts, data, levels, cache)
seps_constants = 0.0
for s in separators:
if s == frozenset({}):
continue
nu = len(separators[s])
# Setting alpha here
no_cells_outside = np.prod([l for i, l in
enumerate(no_levels) if
i not in s])
alpha = cell_alpha * no_cells_outside / tot_no_cells
seps_constants += nu * ll_complete_set_ratio(s, alpha, counts, data, levels, cache)
return cliques_constants - seps_constants
def sample_hyper_consistent_counts(graph, levels, constant_alpha):
"""
TODO
"""
junctiontree = trilearn.graph.decomposable.junction_tree(graph)
(C, S, H, A, R) = libj.peo(junctiontree)
parameters = {}
no_levels = np.array([len(l) for l in levels])
for i, clique in enumerate(C):
if i == 0:
nodes = list(clique)
no_cells = np.prod(no_levels[nodes])
alphas = [constant_alpha/no_cells] * no_cells
x = stats.dirichlet.rvs(alphas)
x.shape = tuple(no_levels[nodes])
parameters[clique] = x
else:
# Find clique that contains S[i]
cont_clique = None
for j in range(i):
if S[i] <= C[j]:
cont_clique = C[j]
break
(parameters[clique],
parameters[S[i]]) = hyperconsistent_cliques(cont_clique,
parameters[cont_clique],
clique,
levels,
constant_alpha)
return parameters
def sample_hyper_consistent_parameters(graph, constant_alpha, levels):
junctiontree = trilearn.graph.decomposable.junction_tree(graph)
(C, S, H, A, R) = libj.peo(junctiontree)
parameters = {}
no_levels = np.array([len(l) for l in levels])
for i, clique in enumerate(C):
if i == 0:
nodes = sorted(list(clique))
no_cells = np.prod(no_levels[nodes])
alphas = [constant_alpha/no_cells] * no_cells
x = stats.dirichlet.rvs(alphas) # assume that the corresponding variables are ordered
x.shape = tuple(no_levels[nodes])
parameters[clique] = x
else:
# Find a clique that contains S[i]
cont_clique = None
for j in range(i):
if S[i] < C[j]:
cont_clique = C[j]
break
#print str(clique) + " neighbor of " + str(cont_clique)
(parameters[clique],
parameters[S[i]]) = hyperconsistent_cliques(cont_clique,
parameters[cont_clique],
clique,
levels,
constant_alpha)
return parameters
def hyperconsistent_cliques(clique1, clique1_dist, clique2,
levels, constant_alpha):
""" Returns a distribution for clique2 that is hyper-consistent
with clique1_dist.
Args:
clique1 (set): A clique
clique1_dist (np.array): A distribution for clique1
clique2 (set): A clique
levels (np.array of lists): levels for all nodes in the full graph
"""
sep_list = sorted(list(clique1 & clique2)) # TODO: Bug, does not work if sorting this for some reason
clique1_list = sorted(list(clique1))
clique2_list = sorted(list(clique2))
no_levels = np.array([len(l) for l in levels])
clique2_dist_shape = tuple(no_levels[clique2_list])
sep_dist_shape = tuple(no_levels[sep_list])
clique2_dist = np.zeros( | np.prod(no_levels[clique2_list]) | numpy.prod |
import os
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from c0101_retrieve_clinical import retrieve_clinical
from c0201_query_patents import query_patents
def chart_patents():
"""
"""
query_patents()
# clinical_gov_url = 'https://clinicaltrials.gov/ct2/results?cond=&term=&type=&rslt=&age_v=&gndr=&intr=allogenic+AND+msc&titles=&outc=&spons=&lead=&id=&cntry=&state=&city=&dist=&locn=&rsub=&strd_s=&strd_e=&prcd_s=&prcd_e=&sfpd_s=&sfpd_e=&rfpd_s=&rfpd_e=&lupd_s=&lupd_e=&sort='
# retrieve_clinical(clinical_gov_url)
ref_path = os.path.join( 'metadata')
alloFile = 'allogenicANDmesencymalClinicalGov.csv'
autoFile = 'autologousANDmesencymalClinicalGov.csv'
fig = plt.figure()
ax = plt.subplot(111)
df_return = count_per_year(alloFile)
plt.scatter(df_return['year'], df_return['count'], color = [0,0,1], label = 'allogenic')
plt.plot(df_return['year'], df_return['count'], color = [1,0,0], label = 'allogenic')
df_return = count_per_year(autoFile)
plt.scatter(df_return['year'], df_return['count'], color = [0,0,1], label = 'autologous')
plt.plot(df_return['year'], df_return['count'], color = [0,0,1], label = 'autologous')
ax.legend(loc = 'center left')
plt.title('Clinical Trials of MSC')
plt.savefig('patents.png', bbox_inches='tight')
def count_per_year(refFile):
"""
"""
ref_path = os.path.join( 'metadata')
ref_file = os.path.join(ref_path, refFile)
dfAllo = pd.read_csv(ref_file)
startAllo = list(dfAllo["Start Date"])
years = []
for start in startAllo:
start = str(start)
fullDate = start.split(' ')
year = fullDate[-1]
years.append(year)
dfAllo['Start Year'] = years
# print(years)
unique_years, unique_counts = [], []
for year in | np.arange(2000, 2025, 1) | numpy.arange |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 2 16:36:49 2020
@author: hanshengjiang
"""
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import scipy.optimize
import time
# change plot fonts
rc = {"font.family" : "serif",
"mathtext.fontset" : "stix",
"font.size": 16}
plt.rcParams.update(rc)
plt.rcParams["font.serif"] = ["Times New Roman"] + plt.rcParams["font.serif"]
from scipy import integrate
def u(r,p,coefficients):
'''
utility function
p: price
r: reference price
a,b,c_pos,c_neg all >=0
'''
(a ,b, c_pos, c_neg) = coefficients
u = a - b * p + c_pos * np.maximum(r - p, 0) + c_neg * np.minimum(r - p, 0)
return u
def R_single(a,b,c_pos,c_neg,r,p):
'''
r: reference price
p: price
a, b, c_pos, c_neg: coefficients
'''
u = a - b * p + c_pos * np.maximum(r - p, 0) + c_neg * np.minimum(r - p, 0)
if u > 100:
revenue = p
elif u < -100:
revenue = 0
else:
temp = 1/(1+np.exp(-u))
revenue = p * temp
return revenue
def R_uniform(r,p,coefficients):
'''
one period revenue, logistic demand function
mixing distribution is a uniform distribution over [bL,bH]
[bL,bH] integratation range or parameter distribution range
p: price
r: reference price
'''
# need to set the integral accuracy lower
# --otherwise the intergal does not converge
BH = np.array(coefficients[3])[1]
(revenue_uniform, abserr) = integrate.nquad(R_single, coefficients,\
args = (r,p), opts = [{'epsabs': 1e-6}, {'epsabs': 1e-6}, {'epsabs': 1e-6}, {'epsabs': 1e-6},])
return revenue_uniform/(BH**4)
def R_uniform_fast(r,p,coefficients):
'''
one period revenue (approximated), logistic demand function
mixing distribution is a uniform distribution over [bL,bH]
[bL,bH] integratation range or parameter distribution range
p: price
r: reference price
coefficients: ( coordiates of the center, edge length of cube)
'''
# need to set the integral accuracy lower
# --otherwise the intergal does not converge
c = np.array(coefficients[0])
BH = float(coefficients[1])
# print(BH)
num_sample = 500
revenue_uniform = 0
for i in range(num_sample):
b = np.random.uniform(-BH/2,BH/2,4)
b = b + c
u = b[0] - b[1] * p + b[2] * np.maximum(r - p, 0) + b[3] * np.minimum(r - p, 0)
if u > 100:
revenue_uniform += p
elif u < -100:
revenue_uniform += 0
else:
temp = 1/(1+np.exp(-u))
revenue_uniform += p*temp
revenue_uniform = revenue_uniform/num_sample
# print(revenue_uniform)
return revenue_uniform
def R(r,p,coefficients,weights):
'''
one period revenue, logistic demand function
multi-segment consumers
p: price
r: reference price
'''
revenue = 0
coefficients = np.array(coefficients)
num_seg = int(len(coefficients)/4)
for i in range(num_seg):
# this number is set to be 26
if u(r,p,coefficients[4*i:4*(i+1)]) > 100:
# print('\n++++++++++++++++++++++++++')
# print('overflow encountered in exp(+inf)')
# print('++++++++++++++++++++++++++\n')
revenue += p * weights[i]
elif u(r,p,coefficients[4*i:4*(i+1)]) < -100:
revenue += 0
else:
revenue += p*np.exp(u(r,p,coefficients[4*i:4*(i+1)]))/(1+ np.exp(u(r,p,coefficients[4*i:4*(i+1)])))*weights[i]
return revenue
def R_ext(r,p,coefficients,weights):
'''
one period revenue, logistic demand function
multi-segment consumers
p: price
r: reference price
coefficients: includ both B and vB
'''
revenue = 0
coefficients = np.array(coefficients)
num_seg = int(len(coefficients)/8)
v = (1, -p, max(r-p,0), min(r-p,0))
eps = 1e-3
for i in range(num_seg):
if np.dot(v,coefficients[8*i+4:8*i+8]) > eps:
revenue += p * weights[i]
elif np.dot(v,coefficients[8*i+4:8*i+8]) < -eps:
revenue += 0
else:
revenue += p*weights[i]*np.exp(np.dot(v,coefficients[8*i:8*i+4]) -\
np.logaddexp(0, np.dot(v,coefficients[8*i:8*i+4])))
return revenue
def R_lin(r,p,coefficients,weights):
'''
one period revenue when the demand function is piece-wise linear
multi-segment consumers
****lower bounded by zero****
Input:
r reference price
p current price
coefficients 4*k coefficients for k segements in a sequential order
Output:
revenue from all segments
'''
revenue = 0
coefficients = np.array(coefficients)
#
num_seg = int(len(coefficients)/4)
for i in range(num_seg):
revenue += p*max(u(r,p,coefficients[4*i:4*(i+1)]),0)*weights[i]
return revenue
def D(r,p,coefficients,weights):
'''
one period demand, logistic demand function
multi-segment consumers
p: price
r: reference price
return
demand
'''
demand = 0
coefficients = np.array(coefficients)
num_seg = int(len(coefficients)/4)
for i in range(num_seg):
if u(r,p,coefficients[4*i:4*(i+1)]) >9000:
demand += 1 * weights[i]
elif u(r,p,coefficients[4*i:4*(i+1)]) <- 9000:
demand += 0
else:
demand += np.exp(u(r,p,coefficients[4*i:4*(i+1)]))/(1+ np.exp(u(r,p,coefficients[4*i:4*(i+1)])))*weights[i]
return demand
def D_lin(r,p,coefficients,weights):
'''
one period demand when the demand function is piece-wise linear
multi-segment consumers
****lower bounded by zero****
Input:
r reference price
p current price
coefficients 4*k coefficients for k segements in a sequential order
Output:
demand from all segments
'''
demand = 0
coefficients = np.array(coefficients)
#
num_seg = int(len(coefficients)/4)
for i in range(num_seg):
# heuristic fix
demand += p* max(u(r,p,coefficients[4*i:4*(i+1)]),0)*weights[i]
return demand
def non_decreasing(x):
dx = np.diff(x)
return np.all(dx >= 0)
def inf_hor_pricing_pricerange(L,H,theta,epsilon,T,gamma,coefficients,weights,func):
'''
Input:
L: lower bound of price
H: upper bound of price
theta: memory parameter of prices
epsilon: accuracy of price discretization
T: number of time periods
gamma: discounted factor
coefficients: u = a - b * p + c_pos * np.maximum(r - p, 0) + c_neg * np.maximum(p - r, 0) utility model
Output:
V: V[i] = revenue for infinite horizon when the first (reference) price is price_list[i]
mu: mu[i] = optimal next reference price in the next time period given reference price is price_list[i]
'''
if T != np.inf:
raise ValueError("Must be infinite horizon!")
# decimals for rounding the value function
decimals_ = 100
price_list = np.arange(L-epsilon,H+2*epsilon,epsilon)
M = len(price_list)
V = np.zeros(M)
mu = np.zeros(M)
####### parameters that can be tuned
k = 1000 #number of iterations in policy evaluation, k could be any positive integer
num_iters = 100 # numer of outermost loop
converge_cnt = 0
start_time = time.time()
for count in range(num_iters):
# policy improvement
for i in range(M):
V_candidate = | np.zeros(M) | numpy.zeros |
#!/bin/python3
import sys
import random
import numpy as np
sys.path.append('..')
from neural_network import NeuralNetwork
import time
from tkinter import *
tk = Tk()
widthSize = 500
heightSize = 500
frameRate = 60
frameSpeed = int(1 / frameRate * 1000)
canvas = Canvas(tk, width=widthSize, height=heightSize, background="black")
tk.title("Drawing_float")
canvas.pack()
inputLen = 2
hiddenLen = 18
outputLen = 1
learningRate = 0.1
n = NeuralNetwork(inputLen, hiddenLen, outputLen)
# With this structure, answer may not be predicted sometimes
# n = NeuralNetwork(2, 2, 1)
training_data = {
1: {'inputs': np.array([[0],[0]]), 'targets': np.array([[0]])},
2: {'inputs': np.array([[0],[1]]), 'targets': np.array([[1]])},
3: {'inputs': | np.array([[1],[0]]) | numpy.array |
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import tensorflow as tf
import sys
import os
import pickle as pickle
from six.moves import urllib
import tarfile
import scipy.stats.mstats
from load_cifar10 import load_data10
# training parameters
initial_learning_rate = 0.001
training_epochs = 200
batch_size = 128
# architecture parameters
n_labels = 10
crop_length = 32
n_channels = 3
image_width = 32
n_input = 32 * 32
mode = 'normal' # 'normal', 'mix', or 'fast'
nonlinearity_name = 'relu'
try:
num_to_make = int(sys.argv[1])
print('Number of foolers to generate:', num_to_make)
except:
print('Defaulted to making one fooling image')
num_to_make = 1
try:
mode = sys.argv[2] # 'normal', 'mix', or 'fast'
print('Chosen mode:', mode)
except:
print('Defaulted to normal mode since no mode given through command line')
mode = 'normal'
def str2bool(v):
return v.lower() in ("yes", "true", "t", "1")
graph = tf.Graph()
with graph.as_default():
x = tf.placeholder(dtype=tf.float32, shape=[None, crop_length, crop_length, n_channels])
y = tf.placeholder(dtype=tf.int64, shape=[None])
is_training = tf.constant(False) # tf.placeholder(tf.bool)
W = {}
bn = {}
params = pickle.load(open("./r32.pkl", "rb"), encoding='latin1')
bn['beta0'] = tf.Variable(params[0])
bn['gamma0'] = tf.Variable(params[1])
bn['mu0'] = tf.constant(params[2])
bn['inv_std0'] = tf.constant(params[3])
for layer in range(1, 32):
# awkward offset because of bn for input
l_str = str(layer)
W['filter' + l_str] = tf.Variable(np.moveaxis(params[layer * 5 - 1], [0, 1, 2, 3], [3, 2, 0, 1]))
bn['beta' + l_str] = tf.Variable(params[layer * 5 + 0])
bn['gamma' + l_str] = tf.Variable(params[layer * 5 + 1])
bn['mu' + l_str] = tf.constant(params[layer * 5 + 2])
bn['inv_std' + l_str] = tf.constant(params[layer * 5 + 3])
W['w_out'] = tf.Variable(params[159])
W['b_out'] = tf.Variable(params[160])
def feedforward(_x, n=5):
rho = tf.nn.relu
def residual_block(h, layer_number=1, input_num_filters=32, increase_dim=False):
l_num = str(layer_number)
if increase_dim:
first_stride = [1, 2, 2, 1]
out_num_filters = input_num_filters * 2
else:
first_stride = [1, 1, 1, 1]
out_num_filters = input_num_filters
stack1 = rho((tf.nn.conv2d(h, W['filter' + l_num], strides=first_stride, padding='SAME') -
bn['mu' + l_num]) * bn['inv_std' + l_num] * bn['gamma' + l_num] + bn['beta' + l_num])
l_num = str(layer_number + 1)
stack2 = (tf.nn.conv2d(stack1, W['filter' + l_num], strides=[1, 1, 1, 1], padding='SAME') -
bn['mu' + l_num]) * bn['inv_std' + l_num] * bn['gamma' + l_num] + bn['beta' + l_num]
if increase_dim:
# upgrade tensorflow h[:, fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b, ::2, :]
# array_ops.strided_slice(h, [0,0,0,0], [2000,-1,-1,input_num_filters], [1,2,2,1])
h_squished = tf.nn.max_pool(h, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='SAME')
padded = tf.pad(h_squished, [[0, 0], [0, 0], [0, 0], [out_num_filters // 4, out_num_filters // 4]])
block = rho(stack2 + padded)
else:
block = rho(stack2 + h)
return block
x_input = (_x - bn['mu0']) * bn['inv_std0'] * bn['gamma0'] + bn['beta0']
# bsize x 32 x 32 x 16
l = rho((tf.nn.conv2d(x_input, W['filter1'], strides=[1, 1, 1, 1], padding='SAME') -
bn['mu1']) * bn['inv_std1'] * bn['gamma1'] + bn['beta1'])
# bsize x 32 x 32 x 16
for i in range(n):
l = residual_block(l, layer_number=2 * i + 2)
# bsize x 16 x 16 x 32
l = residual_block(l, increase_dim=True, layer_number=2 * n + 2, input_num_filters=16)
for i in range(1, n):
l = residual_block(l, layer_number=2 * n + 2 * i + 2)
# bsize x 8 x 8 x 64
l = residual_block(l, increase_dim=True, layer_number=4 * n + 2, input_num_filters=32)
for i in range(1, n):
l = residual_block(l, layer_number=4 * n + 2 * i + 2)
l = tf.reduce_mean(l, reduction_indices=[1, 2])
return tf.matmul(l, W['w_out']) + W['b_out']
def normal(_x):
return feedforward(_x)
def energy_blur(_x):
_x = tf.reshape(_x, [-1, image_width, image_width, 3])
# 5x5, sigma = 0.7
filter = tf.reshape(tf.constant([[0.000252, 0.00352, 0.008344, 0.00352, 0.000252],
[0.00352, 0.049081, 0.11634, 0.049081, 0.00352],
[0.008344, 0.11634, 0.275768, 0.11634, 0.008344],
[0.00352, 0.049081, 0.11634, 0.049081, 0.00352],
[0.000252, 0.00352, 0.008344, 0.00352, 0.000252]],
dtype=tf.float32), [5, 5, 1, 1])
h, s, v = tf.split(3, 3, _x)
h = tf.nn.conv2d(tf.square(h), filter, strides=[1, 1, 1, 1], padding='SAME')
h = tf.sqrt(tf.reshape(h, [-1, 32, 32, 1]) + 1e-12)
s = tf.nn.conv2d(tf.square(s), filter, strides=[1, 1, 1, 1], padding='SAME')
s = tf.sqrt(tf.reshape(s, [-1, 32, 32, 1]) + 1e-12)
v = tf.nn.conv2d(tf.square(v), filter, strides=[1, 1, 1, 1], padding='SAME')
v = tf.sqrt(tf.reshape(v, [-1, 32, 32, 1]) + 1e-12)
_x = tf.concat(3, [h, s, v])
return feedforward(_x)
pred_normal = normal(x)
loss_normal = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(pred_normal, y))
pred_energy_blur = energy_blur(x)
loss_energy_blur = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(pred_energy_blur, y))
if mode == 'normal' or mode == 'fast':
pred = pred_normal
loss = loss_normal
elif mode == 'mix':
pred = (pred_normal + pred_energy_blur) / 2.
loss = loss_normal + loss_energy_blur
sess = tf.InteractiveSession(graph=graph)
tf.initialize_all_variables().run()
train_dataset, train_labels, test_dataset, test_labels = load_data10(randomize=False)
# mean_img = np.reshape(np.mean(train_dataset, 0), (32, 32, 3))
train_dataset = train_dataset.astype(np.float32)
test_dataset = test_dataset.astype(np.float32)
# pred = sess.run(pred, feed_dict={x: train_dataset[0:3000,:,:,:]})
# error = np.argmax(pred, 1) != np.argmax(train_labels[0:3000, :], 1)
# print(np.mean(error))
class_names = ['airplane', 'auto', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
def show_image(image, rescale=False, add_mean=False):
img = image.copy()
img = img.reshape(32,32,3)
# if add_mean:
# img += mean_img
# if rescale:
# low, high = np.min(img), np.max(img)
# img = (img - low) / (high - low)
plt.imshow(img)
plt.gca().axis('off')
def make_fooling_image(image, target, reg=1e-3, step=1/255., max_iters=100, confidence_thresh=0.5):
# NOTE: we clip as a consequence of our discussion about improperly plotted images
orig_image = image.copy() # paranoia
fooling_image = image.copy()
for _ in range(max_iters):
dFool, predictions = sess.run([tf.gradients(loss, x)[0], pred], feed_dict={x: fooling_image, y: [target]})
fooling_image[0] -= step * (np.squeeze(dFool[0]) + reg * (fooling_image[0] - orig_image[0]))
fooling_image[0] = np.clip(fooling_image[0], 0, 1)
fool_prob = sess.run(tf.nn.softmax(predictions)[0, target])
if fool_prob > confidence_thresh:
break
return fooling_image
def make_fooling_image_fast(image, target, reg=1e-3, step=10/255.):
# NOTE: we clip as a consequence of our discussion about improperly plotted images
orig_image = image.copy() # paranoia
fooling_image = image.copy()
dFool = sess.run(tf.gradients(loss, x)[0], feed_dict={x: fooling_image, y: [target]})
fooling_image[0] -= step * np.sign(np.squeeze(dFool[0]) + reg * (fooling_image[0] - orig_image[0]))
fooling_image[0] = np.clip(fooling_image[0], 0, 1)
return fooling_image
l1_distances = []
l2_distances = []
linf_distances = []
# examples = [i for i in range(300, 400)]
# labels = [i % 10 for i in range(300, 400)]
try:
history = pickle.load(open("./data/" + mode + "_foolers.p", "rb"))
except:
history = {}
if not os.path.exists('./data'):
os.makedirs('./data')
if not os.path.exists('./data/normal'):
os.makedirs('./data/normal')
if not os.path.exists('./data/mix'):
os.makedirs('./data/mix')
if not os.path.exists('./data/fast'):
os.makedirs('./data/fast')
for i in range(num_to_make):
# choose source image from which to generate a fooling image
rand_int = | np.random.randint(10000, size=1) | numpy.random.randint |
import numpy as np
from numpy import linalg
import time
import sys
import math
import cmath
global pi
pi = np.pi
global sin
sin = np.sin
global cos
cos = np.cos
global asin
asin = np.arcsin
global acos
acos = np.arccos
global atan2
atan2 = np.arctan2
def asind(x):
temp_theta = asin(x.real)
return np.multiply(temp_theta,180.0/pi)
def acosd(x):
temp_theta = acos(x.real)
return np.multiply(temp_theta,180.0/pi)
def sind(x):
tempx = np.multiply(x,pi/180.0)
return sin(tempx)
def cosd(x):
tempx = np.multiply(x,pi/180.0)
return cos(tempx)
def tand(x):
tempx = np.multiply(x,pi/180.0)
return tan(tempx)
def atan2d(x,y):
try:
temp_theta = atan2(x.real,y.real)
return | np.multiply(temp_theta,180.0/pi) | numpy.multiply |
if __name__ == '__main__':
import os
from glob import glob
from shutil import copy
import numpy as np
image_root = './Images/PSPT'
dir_dst = './Images/PSPT/DailyBest'
os.makedirs(dir_dst, exist_ok=True)
list_raw_paths = list()
list_grad_paths = list()
list_HMI = sorted(glob(os.path.join('./Images/HMI_100', '*.png')))
list_dir = sorted(os.listdir(image_root))
for dir in list_dir:
list_raw_paths.extend(sorted((glob(os.path.join(image_root, dir, 'Raw', '*.png')))))
list_grad_paths.extend(sorted((glob(os.path.join(image_root, dir, 'Gradient/2', '*.png')))))
list_dates = list()
for i in range(len(list_raw_paths)):
name = os.path.split(os.path.splitext(list_raw_paths[i])[0])[-1]
list_dates.append(name[:8])
tuple_dates = sorted(frozenset(list_dates))
for date in tuple_dates:
list_raw_same_date = list()
list_grads_same_date = list()
switch = False
for i, raw in enumerate(list_raw_paths):
if raw.find(date) != -1:
list_raw_same_date.append(list_raw_paths[i])
list_grads_same_date.append(np.fromfile(list_grad_paths[i]).sum())
switch = True
else:
if not switch:
continue
else:
break
np_grads_same_date = | np.asarray(list_grads_same_date) | numpy.asarray |
# coding: utf-8
# Copyright (c) 2021 AkaiKKRteam.
# Distributed under the terms of the Apache License, Version 2.0.
#!/bin/env python
from .Error import *
from .Unit import *
import sys
import numpy as np
from pymatgen.io.cif import CifParser
from pymatgen.core import Structure, PeriodicSite
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from pymatgen.core.periodic_table import Element
import pandas as pd
from .ElementKkr import ElementKKR
_kkr_bohr = Unit().length_au2ang
if False:
try:
from pymatgen.symmetry import kpath
_use_kpath = False
except ImportError:
print("Warning: no kpath in pymatgen. kpath will be omitted.")
_use_kpath = False
class _BravaisKKR:
dict = {
1: "trc", # triclinic
2: "trc", # triclinic
3: "sm", # simple monoclinic
4: "sm", # simple monoclinic
5: "bsm", # base centered monoclinic
6: "sm", # simple monoclinic
7: "sm", # simple monoclinic
8: "bsm", # base centered monoclinic
9: "bsm", # base centered monoclinic
10: "sm", # simple monoclinic
11: "sm", # simple monoclinic
12: "bsm", # base centered monoclinic
13: "sm", # simple monoclinic
14: "sm", # simple monoclinic
15: "bsm", # base centered monoclinic
16: "so", # simple orthorhombic
17: "so", # simple orthorhombic
18: "so", # simple orthorhombic
19: "so", # simple orthorhombic
20: "bso", # base centered orthorhombic
21: "bso", # base centered orthorhombic
22: "fco", # face centered orthorhombic
23: "bco", # body centered orthorhombic
24: "bco", # body centered orthorhombic
25: "so", # simple orthorhombic
26: "so", # simple orthorhombic
27: "so", # simple orthorhombic
28: "so", # simple orthorhombic
29: "so", # simple orthorhombic
30: "so", # simple orthorhombic
31: "so", # simple orthorhombic
32: "so", # simple orthorhombic
33: "so", # simple orthorhombic
34: "so", # simple orthorhombic
35: "bso", # base centered orthorhombic
36: "bso", # base centered orthorhombic
37: "bso", # base centered orthorhombic
38: "bso", # base centered orthorhombic
39: "bso", # base centered orthorhombic
40: "bso", # base centered orthorhombic
41: "bso", # base centered orthorhombic
42: "fco", # face centered orthorhombic
43: "fco", # face centered orthorhombic
44: "bco", # body centered orthorhombic
45: "bco", # body centered orthorhombic
46: "bco", # body centered orthorhombic
47: "so", # simple orthorhombic
48: "so", # simple orthorhombic
49: "so", # simple orthorhombic
50: "so", # simple orthorhombic
51: "so", # simple orthorhombic
52: "so", # simple orthorhombic
53: "so", # simple orthorhombic
54: "so", # simple orthorhombic
55: "so", # simple orthorhombic
56: "so", # simple orthorhombic
57: "so", # simple orthorhombic
58: "so", # simple orthorhombic
59: "so", # simple orthorhombic
60: "so", # simple orthorhombic
61: "so", # simple orthorhombic
62: "so", # simple orthorhombic
63: "bso", # base centered orthorhombic
64: "bso", # base centered orthorhombic
65: "bso", # base centered orthorhombic
66: "bso", # base centered orthorhombic
67: "bso", # base centered orthorhombic
68: "bso", # base centered orthorhombic
69: "fco", # face centered orthorhombic
70: "fco", # face centered orthorhombic
71: "bco", # body centered orthorhombic
72: "bco", # body centered orthorhombic
73: "bco", # body centered orthorhombic
74: "bco", # body centered orthorhombic
75: "st", # simple tetragonal
76: "st", # simple tetragonal
77: "st", # simple tetragonal
78: "st", # simple tetragonal
79: "bct", # body centered tetragonal
80: "bct", # body centered tetragonal
81: "st", # simple tetragonal
82: "bct", # body centered tetragonal
83: "st", # simple tetragonal
84: "st", # simple tetragonal
85: "st", # simple tetragonal
86: "st", # simple tetragonal
87: "bct", # body centered tetragonal
88: "bct", # body centered tetragonal
89: "st", # simple tetragonal
90: "st", # simple tetragonal
91: "st", # simple tetragonal
92: "st", # simple tetragonal
93: "st", # simple tetragonal
94: "st", # simple tetragonal
95: "st", # simple tetragonal
96: "st", # simple tetragonal
97: "bct", # body centered tetragonal
98: "bct", # body centered tetragonal
99: "st", # simple tetragonal
100: "st", # simple tetragonal
101: "st", # simple tetragonal
102: "st", # simple tetragonal
103: "st", # simple tetragonal
104: "st", # simple tetragonal
105: "st", # simple tetragonal
106: "st", # simple tetragonal
107: "bct", # body centered tetragonal
108: "bct", # body centered tetragonal
109: "bct", # body centered tetragonal
110: "bct", # body centered tetragonal
111: "st", # simple tetragonal
112: "st", # simple tetragonal
113: "st", # simple tetragonal
114: "st", # simple tetragonal
115: "st", # simple tetragonal
116: "st", # simple tetragonal
117: "st", # simple tetragonal
118: "st", # simple tetragonal
119: "bct", # body centered tetragonal
120: "bct", # body centered tetragonal
121: "bct", # body centered tetragonal
122: "bct", # body centered tetragonal
123: "st", # simple tetragonal
124: "st", # simple tetragonal
125: "st", # simple tetragonal
126: "st", # simple tetragonal
127: "st", # simple tetragonal
128: "st", # simple tetragonal
129: "st", # simple tetragonal
130: "st", # simple tetragonal
131: "st", # simple tetragonal
132: "st", # simple tetragonal
133: "st", # simple tetragonal
134: "st", # simple tetragonal
135: "st", # simple tetragonal
136: "st", # simple tetragonal
137: "st", # simple tetragonal
138: "st", # simple tetragonal
139: "bct", # body centered tetragonal
140: "bct", # body centered tetragonal
141: "bct", # body centered tetragonal
142: "bct", # body centered tetragonal
143: "hcp", # hexagonal close packed
144: "hcp", # hexagonal close packed
145: "hcp", # hexagonal close packed
146: "rhb", # rhombohedral(trigonal)
147: "hcp", # hexagonal close packed
148: "rhb", # rhombohedral(trigonal)
149: "hcp", # hexagonal close packed
150: "hcp", # hexagonal close packed
151: "hcp", # hexagonal close packed
152: "hcp", # hexagonal close packed
153: "hcp", # hexagonal close packed
154: "hcp", # hexagonal close packed
155: "rhb", # rhombohedral(trigonal)
156: "hcp", # hexagonal close packed
157: "hcp", # hexagonal close packed
158: "hcp", # hexagonal close packed
159: "hcp", # hexagonal close packed
160: "rhb", # rhombohedral(trigonal)
161: "rhb", # rhombohedral(trigonal)
162: "hcp", # hexagonal close packed
163: "hcp", # hexagonal close packed
164: "hcp", # hexagonal close packed
165: "hcp", # hexagonal close packed
166: "rhb", # rhombohedral(trigonal)
167: "rhb", # rhombohedral(trigonal)
168: "hcp", # hexagonal close packed
169: "hcp", # hexagonal close packed
170: "hcp", # hexagonal close packed
171: "hcp", # hexagonal close packed
172: "hcp", # hexagonal close packed
173: "hcp", # hexagonal close packed
174: "hcp", # hexagonal close packed
175: "hcp", # hexagonal close packed
176: "hcp", # hexagonal close packed
177: "hcp", # hexagonal close packed
178: "hcp", # hexagonal close packed
179: "hcp", # hexagonal close packed
180: "hcp", # hexagonal close packed
181: "hcp", # hexagonal close packed
182: "hcp", # hexagonal close packed
183: "hcp", # hexagonal close packed
184: "hcp", # hexagonal close packed
185: "hcp", # hexagonal close packed
186: "hcp", # hexagonal close packed
187: "hcp", # hexagonal close packed
188: "hcp", # hexagonal close packed
189: "hcp", # hexagonal close packed
190: "hcp", # hexagonal close packed
191: "hcp", # hexagonal close packed
192: "hcp", # hexagonal close packed
193: "hcp", # hexagonal close packed
194: "hcp", # hexagonal close packed
195: "sc", # simple cubic
196: "fcc", # face centered cubic
197: "bcc", # body centered cubic
198: "sc", # simple cubic
199: "bcc", # body centered cubic
200: "sc", # simple cubic
201: "sc", # simple cubic
202: "fcc", # face centered cubic
203: "fcc", # face centered cubic
204: "bcc", # body centered cubic
205: "sc", # simple cubic
206: "bcc", # body centered cubic
207: "sc", # simple cubic
208: "sc", # simple cubic
209: "fcc", # face centered cubic
210: "fcc", # face centered cubic
211: "bcc", # body centered cubic
212: "sc", # simple cubic
213: "sc", # simple cubic
214: "bcc", # body centered cubic
215: "sc", # simple cubic
216: "fcc", # face centered cubic
217: "bcc", # body centered cubic
218: "sc", # simple cubic
219: "fcc", # face centered cubic
220: "bcc", # body centered cubic
221: "sc", # simple cubic
222: "sc", # simple cubic
223: "sc", # simple cubic
224: "sc", # simple cubic
225: "fcc", # face centered cubic
226: "fcc", # face centered cubic
227: "fcc", # face centered cubic
228: "fcc", # face centered cubic
229: "bcc", # body centered cubic
230: "bcc", # body centered cubic
}
@staticmethod
def getType(group):
if group in _BravaisKKR.dict:
return _BravaisKKR.dict[group]
else:
return "aux"
class _TranslationKKR:
matrix = {
"sc": np.array([[+1.0, 0.0, 0.0], [0.0, +1.0, 0.0], [0.0, 0.0, +1.0]]),
"fcc": np.array([[0.0, 0.5, 0.5], [0.5, 0.0, 0.5], [0.5, 0.5, 0.0]]),
"bcc": np.array([[-0.5, 0.5, 0.5], [0.5, -0.5, 0.5], [0.5, 0.5, -0.5]]),
"hcp": np.array([[+1.0, 0.0, 0.0], [0.0, +1.0, 0.0], [0.0, 0.0, +1.0]]),
"rhb": np.array([[2.0, 1.0, 1.0], [-1.0, 1.0, 1.0], [-1.0, -2.0, 1.0]])/3.0,
"st": np.array([[+1.0, 0.0, 0.0], [0.0, +1.0, 0.0], [0.0, 0.0, +1.0]]),
"bct": np.array([[-0.5, 0.5, 0.5], [0.5, -0.5, 0.5], [0.5, 0.5, -0.5]]),
"so": np.array([[+1.0, 0.0, 0.0], [0.0, +1.0, 0.0], [0.0, 0.0, +1.0]]),
"fco": np.array([[0.0, 0.5, 0.5], [0.5, 0.0, 0.5], [0.5, 0.5, 0.0]]),
"bco": np.array([[-0.5, 0.5, 0.5], [0.5, -0.5, 0.5], [0.5, 0.5, -0.5]]),
"bso": np.array([[0.5, -0.5, 0.0], [0.5, +0.5, 0.0], [0.0, 0.0, +1.0]]),
"sm": np.array([[+1.0, 0.0, 0.0], [0.0, +1.0, 0.0], [0.0, 0.0, +1.0]]),
"bsm": np.array([[0.5, -0.5, 0.0], [0.5, +0.5, 0.0], [0.0, 0.0, +1.0]]),
"trc": np.array([[+1.0, 0.0, 0.0], [0.0, +1.0, 0.0], [0.0, 0.0, +1.0]]),
"aux": np.array([[+1.0, 0.0, 0.0], [0.0, +1.0, 0.0], [0.0, 0.0, +1.0]]),
}
@staticmethod
def getMatrix(brvtyp):
if brvtyp in _TranslationKKR.matrix:
return _TranslationKKR.matrix[brvtyp]
else:
return _TranslationKKR.matrix["aux"]
class _SiteKKR:
def __init__(self, site, wyckoff, coords, Vc: str = "Og"):
"""initialization
Args:
site ([type]): [description]
wyckoff ([type]): [description]
coords ([type]): [description]
Vc (str, optional): element treast as Z=0. Defaults to "Og".
"""
def fold(c):
c[0] = c[0] % 1.0
c[1] = c[1] % 1.0
c[2] = c[2] % 1.0
if c[0] > 1.0-1.0e-8:
c[0] = 0.0
if c[1] > 1.0-1.0e-8:
c[1] = 0.0
if c[2] > 1.0-1.0e-8:
c[2] = 0.0
return c
self.species = site.species
self.frac_coords = fold(coords)
conc_sum = 0.0 # sum of concentration.
for element in site.species.elements:
conc_sum += site.species.get_el_amt_dict()[element.symbol]
if conc_sum < 1.0:
self.name = site.species.reduced_formula + Vc + "_%s" % wyckoff
else:
self.name = site.species.reduced_formula + "_%s" % wyckoff
def findSameCoords(self, vsite):
def match_coords(ca, cb):
return abs(ca[0]-cb[0]) < 1.0e-8 \
and abs(ca[1]-cb[1]) < 1.0e-8 \
and abs(ca[2]-cb[2]) < 1.0e-8
found = False
for site in vsite:
if match_coords(self.frac_coords, site.frac_coords):
found = True
break
return found
def findSameName(self, vsite):
found = False
for site in vsite:
if self.name == site.name:
found = True
break
return found
def _get_spginfo_from_ciffile(filename):
"""extract space group data from a cif file
returns None the cif file lacks it.
Note:
_symmetry_space_group_name_H-M, _symmetry_Int_Tables_number, _symmetry_cell_setting are obsolte, but are used widely. Therefore, they can be read.
Args:
filename (str): cif filename
Returns:
str: _space_group_name_H-M_alt
int: _space_group_IT_number
str: _space_group_crystal_system
"""
with open(filename) as f:
data = f.read().splitlines()
name = None
number = None
cellsetting = None
for line in data:
s = line.split()
if len(s) > 1:
if s[0] == "_symmetry_space_group_name_H-M" or s[0] == "_space_group_name_H-M_alt":
name = " ".join(s[1:])
if s[0] == "_symmetry_Int_Tables_number" or s[0] == "_space_group_IT_number":
number = int(s[1])
if s[0] == "_symmetry_cell_setting" or s[0] == "_space_group_crystal_system":
cellsetting = s[1]
# debug print
if False:
print("ciffile: name, number, cellsetting", name, number, cellsetting)
return name, number, cellsetting
class StructureSpeciesConverter:
def __init__(self, structure):
# make unit species dictionary
species_dict = []
for site in structure.sites:
newitem = dict(site.species.as_dict())
if newitem not in species_dict:
species_dict.append(newitem)
# make conversion table
if True:
conv_table = {}
Z = 0
for content in species_dict:
Z += 1
if Z == 2 or Z == 10 or Z == 18 or Z == 36 or Z == 54 or Z == 86:
# skip noble gas
Z += 1
if Z > 103:
raise CIF2KKRTooManyTypesError("too many type definitions")
elm = Element("H").from_Z(Z)
elm_str = str(elm)
conv_table[elm_str] = content
else:
conv_table = {}
for Z, content in zip(range(1, 104), species_dict):
elm = Element("H").from_Z(Z)
elm_str = str(elm)
conv_table[elm_str] = content
self.conv_table = conv_table
# make a dummay struture
new_species = []
for site in structure.sites:
newitem = dict(site.species.as_dict())
keys = [k for k, v in conv_table.items() if v == newitem]
if len(keys) != 1:
print("strange keys", keys)
raise ValueError
new_species.append(keys[0])
# make fractional coordinates
frac_coords = []
for site in structure.sites:
frac_coords.append(site.frac_coords)
new_structure = Structure(
lattice=structure.lattice, species=new_species, coords=frac_coords)
self.substituted_structure = new_structure
@ property
def structure(self):
return self.substituted_structure
def inverse_conversion(self, structure):
conv_table = self.conv_table
# make a dummay struture
new_species = []
for site in structure.sites:
newitem = dict(site.species.as_dict())
if len(newitem.keys()) != 1:
print("keys >1", newitem)
raise ValueError
key = list(newitem.keys())[0]
if newitem[key] != 1.0:
raise ValueError("value error in inverse_conversion")
new_species.append(conv_table[key])
# make fractional coordinates
frac_coords = []
for site in structure.sites:
frac_coords.append(site.frac_coords)
new_structure = Structure(
lattice=structure.lattice, species=new_species, coords=frac_coords)
return new_structure
def _show_equiv_matrix(structure, input_analyzer, wy):
"""obsolete"""
species = structure.species
ops = input_analyzer.get_space_group_operations()
n = len(structure.sites)
# equiv_matrix = np.full((n, n), False)
equiv_matrix = np.identity(n, dtype=bool)
for i1 in range(n):
for i2 in range(i1, n):
site1 = PeriodicSite(
species=species[i1], coords=structure.sites[i1].frac_coords, lattice=structure.lattice)
site2 = PeriodicSite(
species=species[i2], coords=structure.sites[i2].frac_coords, lattice=structure.lattice)
eq = ops.are_symmetrically_equivalent([site1], [site2])
equiv_matrix[i1, i2] = eq
for i1 in range(n):
for i2 in range(i1, n):
equiv_matrix[i2, i1] = equiv_matrix[i1, i2]
# make indeces and columns
namelist = []
for specie, wy in zip(species, wy):
namelist.append("{},{}".format(specie, wy))
df = pd.DataFrame(equiv_matrix, index=namelist, columns=namelist)
print(df)
uniq_name_wy_list = list(set(namelist))
print(uniq_name_wy_list)
uniq_name_wy_count = {}
for key in uniq_name_wy_list:
uniq_name_wy_count[key] = 0
# make submatrix step by step
nlist = list(range(len(namelist)))
for i in nlist:
flaglist = df.iloc[i, :].values
ilist = np.where(flaglist == True)[0]
dfs = df.iloc[ilist, ilist]
name_wy = dfs.columns[0]
s = name_wy.split(",")
if len(s) == 2:
print(dfs)
uniq_name_wy_count[name_wy] += 1
id_ = uniq_name_wy_count[name_wy]
new_name_wy = "{},{}".format(name_wy, id_)
for j in ilist:
namelist[j] = new_name_wy
df.index = namelist
df.columns = namelist
print(df)
elm_list = []
wy_list = []
id_list = []
for name in df.columns:
s = name.split(",")
elm_list.append(s[0])
wy_list.append(s[1])
id_list.append(s[2])
return elm_list, wy_list, id_list
def _get_uniq_wyckoff(structure):
analyzer = SpacegroupAnalyzer(structure)
wyckoffs = analyzer.get_symmetry_dataset()["wyckoffs"]
equiv_atoms = analyzer.get_symmetry_dataset()["equivalent_atoms"]
if len(wyckoffs) != len(equiv_atoms) or len(wyckoffs) != len(structure.sites):
print("len(wyckoffs)={}, ".format(len(wyckoffs)) +
"len(equiv_atoms)={}, ".format(len(equiv_atoms)) +
"len(structure.sites)={}".format(len(structure.sites)))
raise ValueError(
"len(wyckoffs)!=len(equiv_atoms), possibly !=len(structure.sites)")
wyckoffs_conv = []
for site, wy, eq in zip(structure.sites, wyckoffs, equiv_atoms):
mul = np.count_nonzero(equiv_atoms == eq)
wyckoffs_conv.append("{}{}_{}".format(mul, wy, str(eq)))
# debug print
if False:
print(wyckoffs_conv)
for site, wy in zip(structure.sites, wyckoffs_conv):
print("spceie, wy,eq", site.as_dict()["species"], wy)
return wyckoffs_conv
# obsolete algorithm
# Kino keeps it for future use
if True:
speciesconverter = StructureSpeciesConverter(structure)
substitutedstructure = speciesconverter.structure
else:
substitutedstructure = structure
print(substitutedstructure)
elm_list, wy_list, id_list = _show_equiv_matrix(
substitutedstructure, analyzer, wyckoffs)
print(elm_list, wy_list, id_list)
converted_structure = speciesconverter.inverse_conversion(
substitutedstructure)
print(converted_structure)
wyckoff_conv = []
for wy, id_ in zip(wy_list, id_list):
wyckoff_conv.append("{}_{}".format(wy, id_))
for site, wy in zip(converted_structure.sites, wyckoff_conv):
namedict = site.species.as_dict()
name = None
if len(namedict.keys()) == 1:
key = list(namedict.keys())[0]
if namedict[key] == 1.0:
name = key
if name is None:
name = str(site.species)
print(str(site.species), "_".join([name, wy]))
return wyckoff_conv
def _found_unknown_elements(structure):
elementkkr = ElementKKR(Vc=None)
elementkkr = list(elementkkr.dict.keys())
sites = structure.sites
for site in sites:
for elm in site.species.elements:
elm = str(elm)
if elm not in elementkkr:
print("unknown element", elm)
print("known elements are", elementkkr)
return True
return False
def _show_cell_parameters(structure_conv):
print("# conventional cell")
print(" a=%9.5f b=%9.5f c=%9.5f(a.u)" %
(structure_conv.lattice.a,
structure_conv.lattice.b,
structure_conv.lattice.c))
print(" alpha=%9.5f beta=%9.5f gamma=%9.5f(degree)" %
(structure_conv.lattice.alpha,
structure_conv.lattice.beta,
structure_conv.lattice.gamma))
def _show_lattice_parameters(lattice_constant, structure_conv, lattice_prim):
print("# lattice constant a %9.5f [angstrom]" % lattice_constant)
print("# conventional translation vectors (in units of a)")
print(" a=(%9.5f%9.5f%9.5f)" %
(structure_conv.lattice.matrix[0][0]/lattice_constant,
structure_conv.lattice.matrix[0][1]/lattice_constant,
structure_conv.lattice.matrix[0][2]/lattice_constant))
print(" b=(%9.5f%9.5f%9.5f)" %
(structure_conv.lattice.matrix[1][0]/lattice_constant,
structure_conv.lattice.matrix[1][1]/lattice_constant,
structure_conv.lattice.matrix[1][2]/lattice_constant))
print(" c=(%9.5f%9.5f%9.5f)" %
(structure_conv.lattice.matrix[2][0]/lattice_constant,
structure_conv.lattice.matrix[2][1]/lattice_constant,
structure_conv.lattice.matrix[2][2]/lattice_constant))
volume = np.dot(np.cross(structure_conv.lattice.matrix[0],
structure_conv.lattice.matrix[1]),
structure_conv.lattice.matrix[2])
print(" volume= %10.5f(a.u.)" % (volume/_kkr_bohr**3))
print("# primitive translation vectors (in units of a)")
print(" a=(%9.5f%9.5f%9.5f)" %
(lattice_prim[0][0]/lattice_constant,
lattice_prim[0][1]/lattice_constant,
lattice_prim[0][2]/lattice_constant))
print(" b=(%9.5f%9.5f%9.5f)" %
(lattice_prim[1][0]/lattice_constant,
lattice_prim[1][1]/lattice_constant,
lattice_prim[1][2]/lattice_constant))
print(" c=(%9.5f%9.5f%9.5f)" %
(lattice_prim[2][0]/lattice_constant,
lattice_prim[2][1]/lattice_constant,
lattice_prim[2][2]/lattice_constant))
volume = np.dot(
np.cross(lattice_prim[0], lattice_prim[1]), lattice_prim[2])
print(" volume= %10.5f(a.u.)" % (volume/_kkr_bohr**3))
def _show_atomic_position(sites_prim, lattice_prim, lattice_constant):
print("# atomic positions (in units of a) %d atoms" % len(sites_prim))
for site in sites_prim:
position = np.dot(site.frac_coords, lattice_prim)/lattice_constant
if abs(position[0]) < 1e-6:
position[0] = 0.0
if abs(position[1]) < 1e-6:
position[1] = 0.0
if abs(position[2]) < 1e-6:
position[2] = 0.0
print(" position=%13.8f%13.8f%13.8f type=%s" %
(position[0], position[1], position[2], site.name))
def ak_cif2kkrparam(filename: str, use_bravais: bool = True, use_primitive: bool = True,
cif_primitive: bool = True,
fmt: str = "cif", Vc: str = "Og",
show_detail: bool = False):
"""
check whether the cif space group number is the same as that of spglib
check whether the number of sites of the cif file is the same as that of this routine
If use_bravais is True, use_primitive is set to True
if fmt is "cif", CifParser is used. In the other fmt, Structure.from_file() is used.
Args:
filename (str): cif filename
use_bravais (bool, optional): use bravias lattice. Defaults to True.
use_primitive (bool, optional): use primitive cell. Defaults to True.
cif_primitive (bool, optional): read the cif file as primitive cell. Defaults to True.
fmt (str, optional): filename format. Defaults to "cif".
show_detail (bool, optional): [description]. Defaults to False.
Raises:
CIF2KKRGetStructureError: failed to read structure via CifParser
CIF2KKRUnknownElementError: unknown element in the cif file
"""
if fmt == "cif":
parser = CifParser(filename)
try:
print("cif_primitive=", cif_primitive)
structure_work = parser.get_structures(primitive=cif_primitive)[0]
except ValueError:
raise CIF2KKRGetStructureError("failed in parser.get_structures.\n"
+ "please correct occupancies and coordinates.")
else:
try:
structure_work = Structure.from_file(filename)
except ValueError:
raise CIF2KKRGetStructureError("failed in Struture.from_file.\n"
+ "please check occupancies and coordinates.")
if _found_unknown_elements(structure_work):
raise CIF2KKRUnknownElementError("unknown element in the cif file")
analyzer = SpacegroupAnalyzer(structure_work)
# analyzer = SpacegroupAnalyzer(structure_work,symprec=0.001, angle_tolerance=0.5) # bad result
try:
structure_conv = analyzer.get_conventional_standard_structure()
except TypeError:
raise CIF2KKRGetConventionalStandardStructureError("failed in analyzer.get_conventional_standard_structure.\n"
+ "please correct occupancies and coordinates.")
param = {}
if use_bravais:
use_primitive = True
# setup primitive cell vectors.
if use_primitive:
if fmt == "cif":
_, cif_number, _ = _get_spginfo_from_ciffile(filename)
if False:
spginfo = structure_work.get_space_group_info()
number = spginfo[1]
else:
number = analyzer.get_space_group_number()
if fmt == "cif":
print("cif symmetry, spg lib symmetry", cif_number, number)
if cif_number != number:
print("WARNING: spg number in the cif file != spg number from spglib")
# raise CIF2KKRSpgDifferentError(
# "spg number in the cif file != spg number from spglib")
brvtyp = _BravaisKKR.getType(number)
if show_detail:
print("# space group")
print(" number=%d bravais=%s kkr_brvtyp=%s" % (
number,
analyzer.get_crystal_system(), brvtyp))
matrix = _TranslationKKR.getMatrix(brvtyp)
lattice_prim = | np.dot(matrix, structure_conv.lattice.matrix) | numpy.dot |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Transform the gamestate data to onehot vectors
"""
from sklearn.preprocessing import OneHotEncoder,LabelEncoder
import pandas as pd
import numpy as np
import re
import os
from pathlib import Path
# settlements and cities are built on node coordinates
# roads are built on edge coordinates
# nodes are named after an adjacent rode
# hence the set of node coords is a subset of the edge coords
# Additionally, no nodes close to the sea are needed...
# these are inaccessible for building
# edge_coordinates contains the edge coordinates on which a player
# could actually built
# len(edge_coordinates) = 72
edge_coordinates = ['0x27','0x38','0x49','0x5a','0x6b','0x7c',
'0x26','0x48','0x6a','0x8c',
'0x25','0x36','0x47','0x58','0x69','0x7a','0x8b','0x9c',
'0x24','0x46','0x68','0x8a','0xac',
'0x23','0x34','0x45','0x56','0x67','0x78','0x89','0x9a','0xab','0xbc',
'0x22','0x44','0x66','0x88','0xaa','0xcc',
'0x32','0x43','0x54','0x65','0x76','0x87','0x98','0xa9','0xba','0xcb',
'0x42','0x64','0x86','0xa8','0xca',
'0x52','0x63','0x74','0x85','0x96','0xa7','0xb8', '0xc9',
'0x62','0x84','0xa6','0xc8',
'0x72','0x83','0x94','0xa5','0xb6','0xc7']
# additional node coordinates
# (that are not in the accessible edge_coordinates list)
# the ones on the right side of the land that are named after
# sea edge nodes
node_coordinates = ['0x8d', '0xad','0xcd','0xdc','0xda','0xd8']
# all the coordinates of the table that a player can build on
# plus the none value for when the player has not built
# len(build_coords) = 79
build_coords = edge_coordinates + node_coordinates + ['None']
################################
# encoding the build coordinates
################################
np_build_coords = np.array(build_coords)
label_encoder = LabelEncoder()
integer_encoded_build_coords = label_encoder.fit_transform(np_build_coords)
#print(label_encoder.transform(np.array(['0x69'])))
######################
# for debugging use:
######################
#print('building coordinates label encoding')
#for x in build_coords:
# print('coordinate ' + str(x) + ' : '+str(label_encoder.transform(np.ravel(x))))
#print('-----------------------------------')
#building coordinates label encoding
#coordinate 0x27 : [5]
#coordinate 0x38 : [9]
#coordinate 0x49 : [17]
#coordinate 0x5a : [22]
#coordinate 0x6b : [32]
#coordinate 0x7c : [38]
#coordinate 0x26 : [4]
#coordinate 0x48 : [16]
#coordinate 0x6a : [31]
#coordinate 0x8c : [48]
#coordinate 0x25 : [3]
#coordinate 0x36 : [8]
#coordinate 0x47 : [15]
#coordinate 0x58 : [21]
#coordinate 0x69 : [30]
#coordinate 0x7a : [37]
#coordinate 0x8b : [47]
#coordinate 0x9c : [54]
#coordinate 0x24 : [2]
#coordinate 0x46 : [14]
#coordinate 0x68 : [29]
#coordinate 0x8a : [46]
#coordinate 0xac : [62]
#coordinate 0x23 : [1]
#coordinate 0x34 : [7]
#coordinate 0x45 : [13]
#coordinate 0x56 : [20]
#coordinate 0x67 : [28]
#coordinate 0x78 : [36]
#coordinate 0x89 : [45]
#coordinate 0x9a : [53]
#coordinate 0xab : [61]
#coordinate 0xbc : [67]
#coordinate 0x22 : [0]
#coordinate 0x44 : [12]
#coordinate 0x66 : [27]
#coordinate 0x88 : [44]
#coordinate 0xaa : [60]
#coordinate 0xcc : [73]
#coordinate 0x32 : [6]
#coordinate 0x43 : [11]
#coordinate 0x54 : [19]
#coordinate 0x65 : [26]
#coordinate 0x76 : [35]
#coordinate 0x87 : [43]
#coordinate 0x98 : [52]
#coordinate 0xa9 : [59]
#coordinate 0xba : [66]
#coordinate 0xcb : [72]
#coordinate 0x42 : [10]
#coordinate 0x64 : [25]
#coordinate 0x86 : [42]
#coordinate 0xa8 : [58]
#coordinate 0xca : [71]
#coordinate 0x52 : [18]
#coordinate 0x63 : [24]
#coordinate 0x74 : [34]
#coordinate 0x85 : [41]
#coordinate 0x96 : [51]
#coordinate 0xa7 : [57]
#coordinate 0xb8 : [65]
#coordinate 0xc9 : [70]
#coordinate 0x62 : [23]
#coordinate 0x84 : [40]
#coordinate 0xa6 : [56]
#coordinate 0xc8 : [69]
#coordinate 0x72 : [33]
#coordinate 0x83 : [39]
#coordinate 0x94 : [50]
#coordinate 0xa5 : [55]
#coordinate 0xb6 : [64]
#coordinate 0xc7 : [68]
#coordinate 0x8d : [49]
#coordinate 0xad : [63]
#coordinate 0xcd : [74]
#coordinate 0xdc : [77]
#coordinate 0xda : [76]
#coordinate 0xd8 : [75]
#coordinate None : [78]
# binary encode
onehot_encoder = OneHotEncoder(sparse=False)
integer_encoded_build_coords = integer_encoded_build_coords.reshape(len(integer_encoded_build_coords), 1)
onehot_encoded_build_coords = onehot_encoder.fit_transform(integer_encoded_build_coords)
#print(onehot_encoded_build_coords)
##############################################
# Testing
##############################################
# test label transform ['0x69' '0x89' 'None']
#print('Testing the build coordinates')
#y = gamestates.iloc[2,6:9]
#values = np.array(y)
#print(values)
#integer_encoded = label_encoder.transform(np.ravel(values))
#print(integer_encoded)
#integer_encoded = integer_encoded.reshape(len(integer_encoded), 1)
#onehot_encoded = onehot_encoder.transform(integer_encoded)
#print(onehot_encoded)
#print('eotesting build coordinates')
# robber can be placed on land hexes (19)
land_coords = ['0x37','0x59','0x7b',
'0x35','0x57','0x79','0x9b',
'0x33','0x55','0x77','0x99','0xbb',
'0x53','0x75','0x97','0xb9',
'0x73','0x95','0xb7'
]
################################
# encoding the land coordinates
# aka robber coordinates
################################
np_rob_coords = np.array(land_coords)
rob_label_encoder = LabelEncoder()
integer_encoded_rob_coords = rob_label_encoder.fit_transform(np_rob_coords)
# print(integer_encoded_rob_coords)
# [ 2 6 11 1 5 10 15 0 4 9 14 18 3 8 13 17 7 12 16]
# binary encode
rob_onehot_encoder = OneHotEncoder(sparse=False)
integer_encoded_rob_coords = integer_encoded_rob_coords.reshape(len(integer_encoded_rob_coords), 1)
onehot_encoded_rob_coords = rob_onehot_encoder.fit_transform(integer_encoded_rob_coords)
#print(onehot_encoded_rob_coords)
##############################################
# Testing
##############################################
## test robber coordinates of pilot01
#print('Testing the robber ')
#y = gamestates.iloc[:,3]
#values = np.array(y)
#print(values)
#integer_encoded = rob_label_encoder.transform(np.ravel(values))
#print(integer_encoded)
#integer_encoded = integer_encoded.reshape(len(integer_encoded), 1)
#onehot_encoded = rob_onehot_encoder.transform(integer_encoded)
#print(onehot_encoded)
#print('eotesting robber')
################################
# encoding the hex typed
################################
# this needs to have custom categories because of the ports
# in the game version of the data
# 6: water
# 0: desert
# 1: clay
# 2: ore
# 3: sheep
# 4: wheat
# 5: wood
# 7 - 12 : miscelaneous ports(3:1) facing on the different directions
# 16+ : non miscelaneous ports(2:1)
#
# 9 categories
def hexLabelEncoder(hextypes):
'''
converts the hextypes to labeled (9 labels for the 9 categories)
Parameters: hex board layout array
Returns: array that contains the labels
'''
y = []
# pilot1 hexlayout is
#[9, 6, 67, 6, 6, 2, 5, 1, 66, 8, 2, 3, 1, 2, 6, 6, 5, 3, 4, 1, 4, 11, 36, 5, 4, 0, 5, 6, 6, 4, 3, 3, 97, 21, 6, 12, 6]
for x in hextypes:
if x < 7 :
y.append(x)
elif 7<= x <= 12:
y.append(7)
else :
y.append(8)
return y
###### checking the general fit
###### generalized ohe encoder for list of all possible land types
hex_type_OHencoder = OneHotEncoder(sparse=False)
hex_type_labels = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8])
#integer_encoded_types = integer_encoded_types.reshape(len(integer_encoded_types),1)
OHE_land_types = hex_type_OHencoder.fit_transform(hex_type_labels.reshape(len(hex_type_labels),1))
#print(OHE_land_types)
################################################
# Testing
##############################################
## test land types of pilot01
#hextypes = gamestates.iloc[0,1]
#integer_encoded_types = np.array(hexLabelEncoder(hextypes))
#print(integer_encoded_types)
# outputs:
# pilot1 hexlayout is
#[9, 6, 67, 6, 6, 2, 5, 1, 66, 8, 2, 3, 1, 2, 6, 6, 5, 3, 4, 1, 4, 11, 36, 5, 4, 0, 5, 6, 6, 4, 3, 3, 97, 21, 6, 12, 6]
# converted to:
# [7 6 8 6 6 2 5 1 8 7 2 3 1 2 6 6 5 3 4 1 4 7 8 5 4 0 5 6 6 4 3 3 8 8 6 7 6]
#ohe_hex_layout = hex_type_OHencoder.transform(integer_encoded_types.reshape(len(integer_encoded_types),1))
######################################################
# create the numpy array that contains the ohe vectors
######################################################
#
# store the data to an np array so that the can be used
# in keras
#
# a massive np array will be created with all the games at the end, when we
# will be ready to train
# to convert to ohe you first transform to label encoded
# and then to one-hot encode
# np array size :
# rows : 4150
# i.e. for all 57 games we have 4150 gameturns
# columns :
# hex layout : 37 hexes x 9 categories
# -> 333
# robber positions : 19 possible positions (land hexes)
# -> 19
# player state :
# builds : 24 building blocks x 79 categories(coords)
# -> 1896
# dev cards : 25 dev cards (true-false)
# -> 25
##
# total : 333 + 19 + 4x(1896+25) = 8017 + 19 = 8036
######### IMPORTAND ##########
## Instead of a big, chaotic table, save to various small np arrays
##
#ohedata = np.zeros((4150,8036))
## saving pilot1 to np data array
## land hex types
#temp = np.array(hexLabelEncoder(gamestates.iloc[0,1]))
#print('-------')
#print(temp)
#print(hex_type_OHencoder.transform(temp.reshape(len(temp),1)))
#
##oned_temp = np.ravel(hex_type_OHencoder.transform(temp.reshape(len(temp),1)))
## this goes from 0 to 332
#ohedata[0,0:333] = np.ravel(hex_type_OHencoder.transform(temp.reshape(len(temp),1)))
#ohedata[0,0:3]=1 # -> writes 1 to columns 0,1,2
######## IMPORTAND ##########
# OHE conversion steps:
# 1. convert hex layout
# 2. convert robber position and append it
# 3. convert player1 build and append them
# 4. convert player1 devcard and append them
# 5. convert player2 3 4
# 6. check size of all this
def convert_hex_layout(hexlayout):
''' converts the gamestates hexlayout to one hot encoding
PARAMETERS
----------
hexlayout : the gamestates hexlayout
Returns
-------
an np array of size (1,333)
'''
# convert the layout to label encoding
labeled = np.array(hexLabelEncoder(hexlayout))
# convert the layout to one hot encoding
ohe = hex_type_OHencoder.transform(labeled.reshape(len(labeled),1))
return np.ravel(ohe)
####Testing OK
#print('Testing hex layout conversion')
#methodlayout = convert_hex_layout(gamestates.iloc[0,1])
#scriptlayout = np.ravel(hex_type_OHencoder.transform(temp.reshape(len(temp),1)))
def convert_robber_position(robber):
''' converts the robber position coordinates to one hot encoding
Parameters
----------
robber: the robber coordinates from the gamestates dataframe
Returns
-------
encoded np array of size 19
'''
# convert the robber position to labeled encoding
robber = np.array(robber)
labeled = rob_label_encoder.transform(np.ravel(robber))
# convert the robber position to one hot encoding
labeled = labeled.reshape(len(labeled),1)
ohe = rob_onehot_encoder.transform(labeled)
# return with ravel to avoid the double list [[]]
return np.ravel(ohe)
####Testing OK
#print('Testing the robber ')
#y = gamestates.iloc[1,3]
#values = np.array(y)
#print(values)
#integer_encoded = rob_label_encoder.transform(np.ravel(values))
#print(integer_encoded)
#integer_encoded = integer_encoded.reshape(len(integer_encoded), 1)
#onehot_encoded = rob_onehot_encoder.transform(integer_encoded)
#print(onehot_encoded)
#print('eotesting robber')
#print('Testing the robber method')
#methodrobber = convert_robber_position(gamestates.iloc[1,3])
#print(methodrobber)
def convert_player_buildings(playerbuildings):
'''
Converts the player buildings coordinates to one hot encoding
Parameters
----------
from the gamestate the players columns of settlements, cities and roads
a list of 24 coordinates
Returns
-------
np array of one hot encoding for all 24 building blocks of the player
size should be (24,79) (in one line vector 24*79 = 1896)
'''
# list of the buildings
buildings = []
for coord in playerbuildings:
ohe_coord = convert_building_coord(coord)
buildings.append(ohe_coord)
#print(buildings)
npbuildings = np.array(buildings)
return np.ravel(npbuildings)
def convert_building_coord(hexcoord):
'''
Convert a hex building coordinate to one hot encoding
Parameters
----------
a hex coordinate
Returns
-------
one hot encoding of the coordinate, an np array or size 79
'''
value = np.array(hexcoord)
# convert the coordinate to labeled encoding
labeled = label_encoder.transform(np.ravel(value))
# convert the coordinate to one hot encoding
labeled = labeled.reshape(len(labeled), 1)
ohe = onehot_encoder.transform(labeled)
return ohe
#######
## Testing the coordinate convertion OK
#print('Testing the coordinate convertion to ohe')
## testing only one coordinate
#coord = gamestates.iloc[2,6]
#print(coord)
#methodcoord = convert_building_coord(coord)
## testing group of coordinates OK
#coords = gamestates.iloc[2,6:9]
#print(coords)
#methodcoords = convert_player_buildings(coords)
#print(methodcoords)
#print(methodcoords.reshape(3,79))
def convert_player_devcards(dev_cards):
'''
Coverts the gamestate fields of the players dev cards
from true/false to binary 1/0
Parameters
----------
dev_cards : the 25 dev cards potentialy available to the player
Returns
-------
np array of size 25 where true is 1 and false is 0
'''
binary_dev_cards =[]
for card in dev_cards:
# type is np.bool, don't use quotes
if card == True :
binary_dev_cards.append(1)
else:
binary_dev_cards.append(0)
return np.array(binary_dev_cards)
#### Testing player dev cards OK
#dev_cards = gamestates.loc[58, 'pl0knight1' : 'pl0vp5']
#dclist = convert_player_devcards(dev_cards)
#print(dclist)
##############################################################################
# OHE conversion
##############################################################################
# convert each dataframe to np arrays
# each game has 10 np arrays of the board, robber and player states in ohe data
datafiles = ["../soclogsData_NoResources/DataTables/pilot/pilot03_gamestates.pkl",
"../soclogsData_NoResources/DataTables/pilot/pilot15_gamestates.pkl",
"../soclogsData_NoResources/DataTables/pilot/pilot17_gamestates.pkl",
"../soclogsData_NoResources/DataTables/pilot/pilot04_gamestates.pkl",
"../soclogsData_NoResources/DataTables/pilot/pilot21_gamestates.pkl",
"../soclogsData_NoResources/DataTables/pilot/pilot02_gamestates.pkl",
"../soclogsData_NoResources/DataTables/pilot/pilot08_gamestates.pkl",
"../soclogsData_NoResources/DataTables/pilot/pilot09_gamestates.pkl",
"../soclogsData_NoResources/DataTables/pilot/pilot14_gamestates.pkl",
"../soclogsData_NoResources/DataTables/pilot/pilot11_gamestates.pkl",
"../soclogsData_NoResources/DataTables/pilot/pilot05_gamestates.pkl",
"../soclogsData_NoResources/DataTables/pilot/pilot16_gamestates.pkl",
"../soclogsData_NoResources/DataTables/pilot/pilot01_gamestates.pkl",
"../soclogsData_NoResources/DataTables/pilot/pilot20_gamestates.pkl",
"../soclogsData_NoResources/DataTables/pilot/pilot13_gamestates.pkl",
"../soclogsData_NoResources/DataTables/pilot/pilot10_gamestates.pkl",
"../soclogsData_NoResources/DataTables/pilot/pilot12_gamestates.pkl",
"../soclogsData_NoResources/DataTables/pilot/pilot07_gamestates.pkl",
"../soclogsData_NoResources/DataTables/pilot/pilot06_gamestates.pkl",
"../soclogsData_NoResources/DataTables/season2/league4_attempt2-2012-11-14-19-46-22-+0000_gamestates.pkl",
"../soclogsData_NoResources/DataTables/season2/practice-2012-10-30-18-41-07-+0000_gamestates.pkl",
"../soclogsData_NoResources/DataTables/season2/League4-2012-11-24-09-17-47-+0000_gamestates.pkl",
"../soclogsData_NoResources/DataTables/season2/Test-2012-10-16-14-53-15-+0100_gamestates.pkl",
"../soclogsData_NoResources/DataTables/season2/L5 Real game-2012-11-11-19-58-55-+0000_gamestates.pkl",
"../soclogsData_NoResources/DataTables/season2/Master League final-2012-12-05-16-59-57-+0000_gamestates.pkl",
"../soclogsData_NoResources/DataTables/season2/League 8 Game 2-2012-11-26-18-55-31-+0000_gamestates.pkl",
"../soclogsData_NoResources/DataTables/season2/SOCL League 5 Game 2-2012-11-25-17-25-09-+0000_gamestates.pkl",
"../soclogsData_NoResources/DataTables/season2/league 5 last game-2012-12-09-21-08-39-+0000_gamestates.pkl",
"../soclogsData_NoResources/DataTables/season2/SOCL League 5 Game 4-2012-12-03-02-11-10-+0000_gamestates.pkl",
"../soclogsData_NoResources/DataTables/season2/League8-2012-11-24-12-04-51-+0000_gamestates.pkl",
"../soclogsData_NoResources/DataTables/season2/League3Game5-2012-11-30-19-59-18-+0000_gamestates.pkl",
"../soclogsData_NoResources/DataTables/season2/master league 4-2012-12-04-17-37-56-+0000_gamestates.pkl",
"../soclogsData_NoResources/DataTables/season2/Master league game 2-2012-11-13-18-07-14-+0000_gamestates.pkl",
"../soclogsData_NoResources/DataTables/season2/Game 3-2012-11-25-20-09-16-+0000_gamestates.pkl",
"../soclogsData_NoResources/DataTables/season2/League 5 game 3-2012-11-26-00-51-20-+0000_gamestates.pkl",
"../soclogsData_NoResources/DataTables/season2/League4-2012-11-09-19-08-53-+0000_gamestates.pkl",
"../soclogsData_NoResources/DataTables/season2/3version2-2012-11-21-20-23-31-+0000_gamestates.pkl",
"../soclogsData_NoResources/DataTables/season2/League3Game1-2012-11-18-20-34-38-+0000_gamestates.pkl",
"../soclogsData_NoResources/DataTables/season2/League3Game4-2012-11-28-20-01-30-+0000_gamestates.pkl",
"../soclogsData_NoResources/DataTables/season2/L5 practicegame-2012-11-11-19-26-36-+0000_gamestates.pkl",
"../soclogsData_NoResources/DataTables/season2/Master League Game 3-2012-11-17-17-01-18-+0000_gamestates.pkl",
"../soclogsData_NoResources/DataTables/season2/Settles league 1-2012-11-08-18-05-34-+0000_gamestates.pkl",
"../soclogsData_NoResources/DataTables/season1/league3practice-2012-05-31-19-23-46-+0100_gamestates.pkl",
"../soclogsData_NoResources/DataTables/season1/League2.4-2012-06-26-22-47-04-+0100_gamestates.pkl",
"../soclogsData_NoResources/DataTables/season1/league3-2012-05-27-19-53-48-+0100_gamestates.pkl",
"../soclogsData_NoResources/DataTables/season1/league2.2-2012-06-18-20-50-12-+0100_gamestates.pkl",
"../soclogsData_NoResources/DataTables/season1/league3 michael-2012-06-17-20-54-03-+0100_gamestates.pkl",
"../soclogsData_NoResources/DataTables/season1/3-2012-06-06-19-58-56-+0100_gamestates.pkl",
"../soclogsData_NoResources/DataTables/season1/League 1-2012-06-17-19-53-24-+0100_gamestates.pkl",
"../soclogsData_NoResources/DataTables/season1/League 1.2-2012-06-21-20-27-05-+0100_gamestates.pkl",
"../soclogsData_NoResources/DataTables/season1/league 3 (-k)-2012-06-25-18-22-53-+0100_gamestates.pkl",
"../soclogsData_NoResources/DataTables/season1/league3minus1-2012-05-25-22-22-21-+0100_gamestates.pkl",
"../soclogsData_NoResources/DataTables/season1/League 2-2012-06-26-20-23-20-+0100_gamestates.pkl",
"../soclogsData_NoResources/DataTables/season1/League 1 game-2012-06-19-18-49-00-+0100_gamestates.pkl",
"../soclogsData_NoResources/DataTables/season1/League 1.1-2012-06-21-18-58-22-+0100_gamestates.pkl",
"../soclogsData_NoResources/DataTables/season1/league1 31may-2012-05-31-19-59-37-+0100_gamestates.pkl",
"../soclogsData_NoResources/DataTables/season1/League 3 Finale-2012-06-25-21-57-53-+0100_gamestates.pkl",
"../soclogsData_NoResources/DataTables/season1/League2-2012-06-17-19-58-07-+0100_gamestates.pkl"
]
# make directories to save the results
ohedata_dir = Path.cwd() / "OHEdata/season2"
ohedata_dir.mkdir(parents=True, exist_ok=True)
ohedata_dir = Path.cwd() / "OHEdata/season1"
ohedata_dir.mkdir(parents=True, exist_ok=True)
ohedata_dir = Path.cwd() / "OHEdata/pilot"
ohedata_dir.mkdir(parents=True, exist_ok=True)
print('Converting gamestates data to ohe-hot encoded vectors')
print('This might take a while. Please be patient...')
for file in datafiles:
# create a dir with the game name
# to save the 11 np arrays of the game
# with the data in ohe
filename_parts = re.split(r'[/]', file)
season = filename_parts[3]
dest = "./OHEdata/"+season
gamename = filename_parts[4][:-15] #exclude the _gamestates.pkl part :-)
path = dest+"/"+gamename
try:
os.mkdir(path)
except OSError :
print("Creation of the directory %s failed" %path)
gamestates = pd.read_pickle(file)
# replace None values with 'None' to work with np
gamestates.replace(to_replace=[None], value='None', inplace=True)
# initialize nptables
nplayout = np.zeros((1,333))
nprobber = np.zeros((len(gamestates.index),19))
np_pl0_builds = np.zeros((len(gamestates.index),1896))
np_pl0_devcards = np.zeros((len(gamestates.index),25))
np_pl1_builds = np.zeros((len(gamestates.index),1896))
np_pl1_devcards = np.zeros((len(gamestates.index),25))
np_pl2_builds = np.zeros((len(gamestates.index),1896))
np_pl2_devcards = np.zeros((len(gamestates.index),25))
np_pl3_builds = np.zeros((len(gamestates.index),1896))
np_pl3_devcards = np.zeros((len(gamestates.index),25))
# convert the hex layout, column 1 is hexlayout
# hex layout does not change during a game, hence it is saved only once
# to view it nplayout.reshape(37,9) tested OK
nplayout[:] = convert_hex_layout(gamestates.iloc[0,1])
# for every row of the df, i.e. every game turn
for turn in range(len(gamestates.index)):
# convert the robber position, column 3 is robber
# tested OK
ohe_robber = convert_robber_position(gamestates.iloc[turn,3])
nprobber[turn,:] = ohe_robber
# convert player 0 building coordinates
# (note the None is also a category in the ohe endoding)
#print(gamestates.loc[turn,'pl0setm1':'pl0road15'])
# ohe_pl0_builds is a np.array of size (1896,)
ohe_pl0_builds = convert_player_buildings(gamestates.loc[turn,'pl0setm1':'pl0road15'])
#print(ohe_pl0_builds)
np_pl0_builds[turn,:] = ohe_pl0_builds
#print(np_pl0_builds[turn,:])
# convert player 0 dev cards
np_pl0_devcards[turn,:] = convert_player_devcards(gamestates.loc[turn,'pl0knight1' : 'pl0vp5'])
# convert player 1 building coordinates
ohe_pl1_builds = convert_player_buildings(gamestates.loc[turn,'pl1setm1':'pl1road15'])
np_pl1_builds[turn,:] = ohe_pl1_builds
# convert player 1 dev cards
np_pl1_devcards[turn,:] = convert_player_devcards(gamestates.loc[turn,'pl1knight1' : 'pl1vp5'])
# convert player 2 building coordinates
ohe_pl2_builds = convert_player_buildings(gamestates.loc[turn,'pl2setm1':'pl2road15'])
np_pl2_builds[turn,:] = ohe_pl2_builds
# convert player 1 dev cards
np_pl2_devcards[turn,:] = convert_player_devcards(gamestates.loc[turn,'pl2knight1' : 'pl2vp5'])
# convert player 3 building coordinates
ohe_pl3_builds = convert_player_buildings(gamestates.loc[turn,'pl3setm1':'pl3road15'])
np_pl3_builds[turn,:] = ohe_pl3_builds
# convert player 3 dev cards
np_pl3_devcards[turn,:] = convert_player_devcards(gamestates.loc[turn,'pl3knight1' : 'pl3vp5'])
# save the np arrays of the game
np.save(path+"/"+'layout.npy',nplayout)
np.save(path+"/"+'robber.npy',nprobber)
np.save(path+"/"+'pl0builds.npy',np_pl0_builds)
| np.save(path+"/"+'pl0devcards.npy',np_pl0_devcards) | numpy.save |
""" Athena binary file reader
At the moment, this reader assumes the conserved fields are in the
files. The Athena custom Grid has methods to do vx = M1 / d, etc.
"""
from __future__ import print_function
import os
import re
import numpy as np
from viscid import glob2
from viscid.readers import vfile
from viscid.readers.vfile_bucket import ContainerFile
from viscid.readers import athena
from viscid import coordinate
class AthenaBinFile(athena.AthenaFile, ContainerFile): # pylint: disable=abstract-method
"""An Athena binary file reader"""
_detector = r"^\s*(.*)\.([0-9]+)\.(bin)\s*$"
_def_fld_center = "Cell"
_collection = None
_fwrapper = None
float_type_name = None
var_type = None
_crds = None
def __init__(self, fname, crds=None, float_type_name=None, var_type=None,
**kwargs):
"""
Keyword Arguments:
float_type_name (str): should be 'f4' or 'f8' if you know
the data type of the file's data.
var_type (str): either 'cons' or 'prim'
"""
# there is no parent bucket, so we need to new one up for children
self.float_type_name = float_type_name
self.var_type = var_type
self._crds = crds
super(AthenaBinFile, self).__init__(fname, **kwargs)
@classmethod
def group_fnames(cls, fnames):
return athena.group_athena_files_common(cls._detector, fnames)
@classmethod
def collective_name_from_group(cls, fnames):
return athena.athena_collective_name_from_group(cls._detector,
fnames)
def get_file_wrapper(self, filename):
if self._fwrapper is None:
self._fwrapper = AthenaBinFileWrapper(filename,
float_type_name=self.float_type_name,
var_type=self.var_type)
else:
assert (self._fwrapper.filename == filename or
glob2(self._fwrapper.filename) == glob2(filename))
return self._fwrapper
def set_file_wrapper(self, wrapper):
raise NotImplementedError("This must be done at file init")
def load(self, fname):
if isinstance(fname, list):
self._collection = fname
else:
self._collection = [fname]
fname0 = self._collection[0]
fname1 = self.collective_name(fname)
basename = os.path.basename(fname0)
self.set_info('run', re.match(self._detector, basename).group(1))
super(AthenaBinFile, self).load(fname1)
def _parse(self):
if self._crds is None:
self._crds = self._make_crds(self._collection[0])
if len(self._collection) == 1:
# load a single file
_grid = self._parse_file(self.fname, self)
self.add(_grid)
self.activate(0)
else:
# load each file, and add it to teh bucket
data_temporal = self._make_dataset(self, dset_type="temporal",
name="AthenaTemporalCollection")
for fname in self._collection:
f = self._load_child_file(fname, index_handle=False,
file_type=type(self),
crds=self._crds,
float_type_name=self.float_type_name,
var_type=self.var_type)
data_temporal.add(f)
data_temporal.activate(0)
self.add(data_temporal)
self.activate(0)
def _parse_file(self, filename, parent_node):
# we do minimal file parsing here for performance. we just
# make data wrappers from the templates we got from the first
# file in the group, and package them up into grids
# find the time from the first field's meta data
_file_wrapper = self.get_file_wrapper(filename)
_file_wrapper.read_header()
time = _file_wrapper.time
_grid = self._make_grid(parent_node, name="<AthenaGrid>")
self.time = time
_grid.time = time
_grid.set_crds(self._crds)
# make a DataWrapper and a Field for each template that we
# have from the first file that we parsed, then add it to
# the _grid
data_wrapper = AthenaBinDataWrapper
for i, fld_name in enumerate(_file_wrapper.fld_names):
if self._def_fld_center.lower() == "cell":
shape = self._crds.shape_cc
else:
shape = self._crds.shape_nc
data = data_wrapper(_file_wrapper, fld_name,
shape[::-1], i)
fld = self._make_field(_grid, "Scalar", fld_name,
self._crds, data, time=time,
center=self._def_fld_center,
zyx_native=True)
_grid.add_field(fld)
return _grid
def _make_crds(self, filename):
fw = AthenaBinFileWrapper(filename, keep_crd_clist=True,
float_type_name=self.float_type_name,
var_type=self.var_type)
with fw as f:
crd_clist = f.crd_clist
new_clist = []
dxmin = np.inf
for c in crd_clist:
if len(c[1]) > 1:
dxmin = np.min([dxmin, np.min(c[1][1:] - c[1][:-1])])
for i, cli in enumerate(crd_clist):
cc = cli[1]
try:
hd = 0.5 * (cc[1:] - cc[:-1])
nc = np.hstack([cc[0] - hd[0],
cc[:-1] + hd,
cc[-1] + hd[-1]])
except IndexError:
dxminh = 0.5 * dxmin
nc = np.array([cc[0] - dxminh, cc[0] + dxminh])
new_clist.append([crd_clist[i][0], nc])
crds = coordinate.wrap_crds("nonuniform_cartesian", new_clist[::-1])
return crds
class AthenaBinFileWrapper(object):
"""A File-like object for interfacing with Athena binary files
Attributes:
float_type_name (str): default float data type, should be
'f4' or 'f8'; deufaults to double ('f8')
"""
# translation is is purely for convenience
float_type_name = "f8"
var_type = "cons"
_file = None
_loc_after_header = None
_endian = None
_float_dtype = None # = np.dtype(_endian + float_type_name)
filename = None
keep_crd_clist = None
fld_names = None
nvars = None
nscalars = None
shape = None
count = None
_file_meta = None
time = None
dt = None
crd_clist = None
def __init__(self, filename, keep_crd_clist=False, float_type_name=None,
var_type=None):
self.filename = filename
self.keep_crd_clist = keep_crd_clist
if float_type_name is not None:
self.float_type_name = float_type_name
if var_type is not None:
self.var_type = var_type
def __del__(self):
self.close()
@property
def float_dtype(self):
if self._float_dtype is None:
with self as _:
# just opening the file makes it read the meta data
pass
return self._float_dtype
@property
def field_names(self):
if self._fld_names_lookup is None:
with self as _:
# just opening the file makes it read the meta data
pass
def read_field(self, fld_id):
"""Read a field given a seekable location
Parameters:
fld_id(int): number of field in file
Returns:
tuple array
"""
if fld_id >= self.nvars:
raise IndexError("File {0} only has {1} fields, you asked for "
"fld number {2}".format(self.filename,
self.nvars, fld_id))
fld_size_bytes = self.count * self._float_dtype.itemsize
self._file.seek(self._loc_after_header + fld_id * fld_size_bytes)
data = np.fromfile(self._file, dtype=self._float_dtype,
count=self.count)
# return ndarray as native endian
return data.astype(self._float_dtype.name)
def read_header(self):
if self._endian is None:
with self as _:
# just opening the file makes it read the header
pass
def open(self):
if self._file is None:
self._file = open(self.filename, 'rb')
try:
if self._endian is None:
self._read_file_header()
except IOError as e:
self.close()
raise e
@property
def isopen(self):
return self._file is not None
def close(self):
if self._file is not None:
f = self._file
self._file = None
f.close()
def __enter__(self):
self.open()
return self
def __exit__(self, exc_type, value, traceback):
self.close()
def _read_file_header(self):
"""load up the file's meta data"""
self._file.seek(0, 0)
coordsys = np.fromfile(self._file, dtype="<i", count=1)[0]
dims = np.fromfile(self._file, dtype="<i", count=5)
# if nvar makes sense, we were right, use little endian
if dims[3] < 1000:
self._endian = "<"
else:
self._endian = ">"
coordsys = coordsys.byteswap()
dims = dims.byteswap()
nx, ny, nz = dims[:3]
nvars, nscalars = dims[3:5] # pylint: disable=unused-variable
dtyp_int = np.dtype(self._endian + "i4") # 32bit int
self._float_dtype = np.dtype(self._endian + self.float_type_name)
# ignore self_gravity and particles flags for now
_, _ = np.fromfile(self._file, dtype=dtyp_int, count=2)
# ignore gamm1 and cs for now
_, _ = | np.fromfile(self._file, dtype=self._float_dtype, count=2) | numpy.fromfile |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Name: <NAME>
# Date: October 11, 2019
# Email: <EMAIL>
# Description: Implementation of the continuous rate RNN model
import os, sys
import numpy as np
import tensorflow as tf
import scipy.io
import pickle
np.random.seed(700)
# import horovod.tensorflow as hvd
# from hpc4neuro.errors import MpiInitError
# from hpc4neuro.distribution import DataDistributor
# import mpi4py
# Import utility functions
from utils import set_gpu
from utils import restricted_float
from utils import str2bool
'''
CONTINUOUS FIRING-RATE RNN CLASS
'''
class FR_RNN_dale:
"""
Firing-rate RNN model for excitatory and inhibitory neurons
Initialization of the firing-rate model with recurrent connections
"""
def __init__(self, N, P_inh, P_rec, w_in, som_N, w_dist, gain, apply_dale, w_out):
"""
Network initialization method
N: number of units (neurons)
P_inh: probability of a neuron being inhibitory
P_rec: recurrent connection probability
w_in: NxN weight matrix for the input stimuli
som_N: number of SOM neurons (set to 0 for no SOM neurons)
w_dist: recurrent weight distribution ('gaus' or 'gamma')
apply_dale: apply Dale's principle ('True' or 'False')
w_out: Nx1 readout weights
Based on the probability (P_inh) provided above,
the units in the network are classified into
either excitatory or inhibitory. Next, the
weight matrix is initialized based on the connectivity
probability (P_rec) provided above.
"""
self.N = N
self.P_inh = P_inh
self.P_rec = P_rec
self.w_in = w_in
self.som_N = som_N
self.w_dist = w_dist
self.gain = gain
self.apply_dale = apply_dale
self.w_out = w_out
# Assign each unit as excitatory or inhibitory
inh, exc, NI, NE, som_inh = self.assign_exc_inh()
self.inh = inh
self.som_inh = som_inh
self.exc = exc
self.NI = NI
self.NE = NE
# Initialize the weight matrix
self.W, self.mask, self.som_mask = self.initialize_W()
def assign_exc_inh(self):
"""
Method to randomly assign units as excitatory or inhibitory (Dale's principle)
Returns
inh: bool array marking which units are inhibitory
exc: bool array marking which units are excitatory
NI: number of inhibitory units
NE: number of excitatory units
som_inh: indices of "inh" for SOM neurons
"""
# Apply Dale's principle
if self.apply_dale == True:
inh = np.random.rand(self.N, 1) < self.P_inh
exc = ~inh
NI = len(np.where(inh == True)[0])
NE = self.N - NI
# Do NOT apply Dale's principle
else:
inh = np.random.rand(self.N, 1) < 0 # no separate inhibitory units
exc = ~inh
NI = len(np.where(inh == True)[0])
NE = self.N - NI
if self.som_N > 0:
som_inh = np.where(inh==True)[0][:self.som_N]
else:
som_inh = 0
return inh, exc, NI, NE, som_inh
def initialize_W(self):
"""
Method to generate and initialize the connectivity weight matrix, W
The weights are drawn from either gaussian or gamma distribution.
Returns
w: NxN weights (all positive)
mask: NxN matrix of 1's (excitatory units)
and -1's (for inhibitory units)
NOTE: To compute the "full" weight matrix, simply
multiply w and mask (i.e. w*mask)
"""
# Weight matrix
w = np.zeros((self.N, self.N), dtype = np.float32)
idx = np.where(np.random.rand(self.N, self.N) < self.P_rec)
if self.w_dist.lower() == 'gamma':
w[idx[0], idx[1]] = np.random.gamma(2, 0.003, len(idx[0]))
elif self.w_dist.lower() == 'gaus':
w[idx[0], idx[1]] = np.random.normal(0, 1.0, len(idx[0]))
w = w/np.sqrt(self.N*self.P_rec)*self.gain # scale by a gain to make it chaotic
if self.apply_dale == True:
w = np.abs(w)
# Mask matrix
mask = np.eye(self.N, dtype=np.float32)
mask[np.where(self.inh==True)[0], np.where(self.inh==True)[0]] = -1
# SOM mask matrix
som_mask = np.ones((self.N, self.N), dtype=np.float32)
if self.som_N > 0:
for i in self.som_inh:
som_mask[i, np.where(self.inh==True)[0]] = 0
return w, mask, som_mask
def load_net(self, model_dir):
"""
Method to load pre-configured network settings
"""
settings = scipy.io.loadmat(model_dir)
self.N = settings['N'][0][0]
self.som_N = settings['som_N'][0][0]
self.inh = settings['inh']
self.exc = settings['exc']
self.inh = self.inh == 1
self.exc = self.exc == 1
self.NI = len(np.where(settings['inh'] == True)[0])
self.NE = len(np.where(settings['exc'] == True)[0])
self.mask = settings['m']
self.som_mask = settings['som_m']
self.W = settings['w']
self.w_in = settings['w_in']
self.b_out = settings['b_out']
self.w_out = settings['w_out']
return self
def display(self):
"""
Method to print the network setup
"""
print('Network Settings')
print('====================================')
print('Number of Units: ', self.N)
print('\t Number of Excitatory Units: ', self.NE)
print('\t Number of Inhibitory Units: ', self.NI)
print('Weight Matrix, W')
full_w = self.W*self.mask
zero_w = len(np.where(full_w == 0)[0])
pos_w = len(np.where(full_w > 0)[0])
neg_w = len(np.where(full_w < 0)[0])
print('\t Zero Weights: %2.2f %%' % (zero_w/(self.N*self.N)*100))
print('\t Positive Weights: %2.2f %%' % (pos_w/(self.N*self.N)*100))
print('\t Negative Weights: %2.2f %%' % (neg_w/(self.N*self.N)*100))
def generate_flip_flop_trial(settings):
bits = settings['bits']
batch = settings['batches']
trial_info = {'neural_input': np.zeros([settings['T'],batch, bits]),
'desired_output':np.zeros([settings['T'],batch, bits])}
unsigned_inp = settings['rng'].binomial(1,0.2,[settings['T']//10,batch, bits])
unsigned_out = 2*settings['rng'].binomial(1,0.5,[settings['T']//10,batch, bits]) -1
inputs = unsigned_inp
inputs = np.multiply(unsigned_inp,unsigned_out)
inputs[0,:] = 1.0
inputs = np.repeat(inputs,10,axis=0)
trial_info['neural_input'] = inputs.T
# trial_info['neural_input'] = 0.5*trial_info['neural_input']
output = np.zeros_like(inputs)
for trial_idx in range(batch):
for bit_idx in range(bits):
input_ = np.squeeze(inputs[:,trial_idx,bit_idx])
t_flip = np.where(input_ != 0)
for flip_idx in range(np.size(t_flip)):
# Get the time of the next flip
t_flip_i = t_flip[0][flip_idx]
'''Set the output to the sign of the flip for the
remainder of the trial. Future flips will overwrite future
output'''
output[t_flip_i:,trial_idx, bit_idx] = \
inputs[t_flip_i,trial_idx, bit_idx]
trial_info['desired_output'] = output
# trial_info['desired_output'] = 0.5*trial_info['desired_output']
return trial_info
'''
CONSTRUCT TF GRAPH FOR TRAINING
'''
def cell_rate(inputs, states):
# if fr_rnn.apply_dale == True:
# # Parametrize the weight matrix to enforce exc/inh synaptic currents
def restore(path):
file = open(path,'rb')
restore_data = file.read()
file.close()
# print(type(pickle.loads(restore_data)))
# print((self.__dict__))
hid= pickle.loads(restore_data,encoding='latin1')
return(hid)
data = restore(os.getcwd()+'/flip.pkl')
x = tf.transpose(states)
r = tf.sigmoid(x)
taus = data['taus']
w = tf.nn.relu(data['w'])
w_in = data['w_in']
# next_x is [N x 1]
ww = tf.matmul(w, data['m'])
ww = tf.multiply(ww, data['som_m'])
# Pass the synaptic time constants thru the sigmoid function
if len(taus) > 1:
taus_sig = tf.sigmoid(taus_gaus)*(taus[1] - taus[0]) + taus[0]
elif len(taus) == 1: # one scalar synaptic decay time-constant
taus_sig = taus[0]
a = tf.multiply((1 - 1/taus_sig), x)
b = tf.multiply((1/taus_sig), ((tf.matmul(ww, r))\
+ tf.matmul(w_in, tf.transpose(inputs[:,:]))))
next_x = a + b #+ tf.random_normal(tf.shape(b))/10
# x.append(next_x)
return tf.transpose(next_x), tf.transpose(next_x)
def find_fps(settings):
def restore(path):
file = open(path,'rb')
restore_data = file.read()
file.close()
# print(type(pickle.loads(restore_data)))
# print((self.__dict__))
hid= pickle.loads(restore_data,encoding='latin1')
return(hid)
data = restore(os.getcwd()+'/flip.pkl')
sess = tf.Session()
sess.run(tf.global_variables_initializer())
x = data['x']
n_bits = settings['bits']
inputs = np.zeros([1,n_bits])
fps = FixedPointSearch(
'rate',
np.array(x),
os.getcwd(),
cell = cell_rate,
sess = sess)
fps.sample_states(1000,np.array(x),'rate',1.5)
unique, all_fps = fps.find_fixed_points(inputs, save= True)
return unique, all_fps
def construct_tf(fr_rnn, settings, training_params):
"""
Method to construct a TF graph and return nodes with
Dale's principle
INPUT
fr_rnn: firing-rate RNN class
settings: dict containing the following keys
T: duration of a single trial (in steps)
stim_on: stimulus starting time (in steps)
stim_dur: stimulus duration (in steps)
delay: delay b/w two stimuli (in steps)
taus: time-constants (in steps)
DeltaT: sampling rate
training_params: dictionary containing training parameters
learning_rate: learning rate
OUTPUT
TF graph
"""
# Task params
T = settings['T']
taus = settings['taus']
DeltaT = settings['DeltaT']
task = settings['task']
# Training params
learning_rate = training_params['learning_rate']
# Excitatory units
exc_idx_tf = tf.constant(np.where(fr_rnn.exc == True)[0], name='exc_idx')
# Inhibitory units
inh_idx_tf = tf.constant(np.where(fr_rnn.inh == True)[0], name='inh_idx')
som_inh_idx_tf = tf.constant(fr_rnn.som_inh, name='som_inh_idx')
if task == 'flip':
stim = tf.placeholder(tf.float32, [settings['bits'],settings['batches'], T], name='u')
# Target node
z = tf.placeholder(tf.float32, [T,settings['batches'],settings['bits']], name='target')
# Initialize the decay synaptic time-constants (gaussian random).
# This vector will go through the sigmoid transfer function.
if len(taus) > 1:
taus_gaus = tf.Variable(tf.random_normal([fr_rnn.N, 1]), dtype=tf.float32,
name='taus_gaus', trainable=True)
elif len(taus) == 1:
taus_gaus = tf.Variable(tf.random_normal([fr_rnn.N, 1]), dtype=tf.float32,
name='taus_gaus', trainable=False)
print('Synaptic decay time-constants will not get updated!')
# Synaptic currents and firing-rates
x = [] # synaptic currents
r = [] # firing-rates
x_t = []
x.append(tf.random_normal([fr_rnn.N,settings['batches']], dtype=tf.float32)/100)
x_t.append(tf.transpose(x[-1]))
# Transfer function options
if training_params['activation'] == 'sigmoid':
r.append(tf.sigmoid(x[0]))
elif training_params['activation'] == 'clipped_relu':
# r.append(tf.clip_by_value(tf.nn.relu(x[0]), 0, 20))
r.append(tf.nn.relu(x[0]))
elif training_params['activation'] == 'softplus':
r.append(tf.clip_by_value(tf.nn.softplus(x[0]), 0, 20))
# Initialize recurrent weight matrix, mask, input & output weight matrices
w = tf.get_variable('w', initializer = fr_rnn.W, dtype=tf.float32, trainable=True)
m = tf.get_variable('m', initializer = fr_rnn.mask, dtype=tf.float32, trainable=False)
som_m = tf.get_variable('som_m', initializer = fr_rnn.som_mask, dtype=tf.float32,
trainable=False)
w_in = tf.get_variable('w_in', initializer = fr_rnn.w_in, dtype=tf.float32, trainable=True)
w_out = tf.get_variable('w_out', initializer = fr_rnn.w_out, dtype=tf.float32,
trainable=True)
b_out = tf.Variable(0, dtype=tf.float32, name='b_out', trainable=True)
# Forward pass
o = [] # output (i.e. weighted linear sum of rates, r)
for t in range(1, T):
if fr_rnn.apply_dale == True:
# Parametrize the weight matrix to enforce exc/inh synaptic currents
w = tf.nn.relu(w)
# next_x is [N x 1]
ww = tf.matmul(w, m)
ww = tf.multiply(ww, som_m)
# Pass the synaptic time constants thru the sigmoid function
if len(taus) > 1:
taus_sig = tf.sigmoid(taus_gaus)*(taus[1] - taus[0]) + taus[0]
elif len(taus) == 1: # one scalar synaptic decay time-constant
taus_sig = taus[0]
next_x = tf.multiply((1 - DeltaT/taus_sig), x[t-1]) + \
tf.multiply((DeltaT/taus_sig), ((tf.matmul(ww, r[t-1]))\
+ tf.matmul(w_in, tf.squeeze(stim[:,:, t-1])))) #+\
# tf.random_normal(tf.shape(x[t-1]), dtype=tf.float32)/10
x.append(next_x)
x_t.append(tf.transpose(next_x))
if training_params['activation'] == 'sigmoid':
r.append(tf.sigmoid(next_x))
elif training_params['activation'] == 'clipped_relu':
# r.append(tf.clip_by_value(tf.nn.relu(next_x), 0, 20))
r.append(tf.nn.relu(next_x))
elif training_params['activation'] == 'softplus':
r.append(tf.clip_by_value(tf.nn.softplus(next_x), 0, 20))
next_o = tf.matmul(w_out, r[t]) + b_out
o.append(tf.transpose(tf.squeeze(next_o)))
return stim, z, x_t, r, o, w, w_in, m, som_m, w_out, b_out, taus_gaus
'''
DEFINE LOSS AND OPTIMIZER
'''
class loss_op:
def __init__(self, o, z, training_params, hvd):
self.o = o
self.z = z
self.global_step = tf.train.get_or_create_global_step()
self.training_params = training_params
self.hvd = hvd
def loss_op(self):
"""
Method to define loss and optimizer for ONLY ONE target signal
INPUT
o: list of output values
z: target values
training_params: dictionary containing training parameters
learning_rate: learning rate
OUTPUT
loss: loss function
training_op: optimizer
"""
# Loss function
# print(z.shape)
# print(tf.stack(tf.squeeze(o)))
loss = tf.zeros(1)
# print(o[0])
loss_fn = self.training_params['loss_fn']
# for i in range(0, len(o)):
# if loss_fn.lower() == 'l1':
# loss += tf.norm(o[i] - z[i])
# elif loss_fn.lower() == 'l2':
# loss += tf.reduce_sum(tf.squared_difference(o[i], z[i]))
# if loss_fn.lower() == 'l2':
# loss = tf.sqrt(loss)
self.loss = tf.reduce_mean(tf.squared_difference(self.o, self.z[:-1,:]))
# Optimizer function
with tf.name_scope('ADAM'):
optimizer = tf.train.AdamOptimizer(learning_rate = self.training_params['learning_rate']*self.hvd.size())
# # decay = tf.train.exponential_decay(self.training_params['learning_rate'], self.global_step, 128, 0.9)
# # optimizer = tf.train.MomentumOptimizer(decay*hvd.size(), 0.9)
# # optimizer = tf.train.AdamOptimizer(decay*hvd.size(),epsilon=1e-1)
# optimizer = self.hvd.DistributedOptimizer( optimizer)
# # gradients, variables = zip(*optimizer.compute_gradients(self.loss,tf.trainable_variables()))
# # gradients = [None if gradient is None else tf.clip_by_norm(gradient, 0.1) for gradient in gradients]
# # self.training_op = optimizer.apply_gradients(zip(gradients, variables), global_step=self.global_step)
self.training_op = optimizer.minimize(self.loss,global_step=self.global_step)
return self.loss, self.training_op
'''
EVALUATE THE TRAINED MODEL
NOTE: NEED TO BE UPDATED!!
'''
def eval_tf(model_dir, settings, u):
"""
Method to evaluate a trained TF graph
INPUT
model_dir: full path to the saved model .mat file
stim_params: dictionary containig the following keys
u: 12xT stimulus matrix
NOTE: There are 12 rows (one per dot pattern): 6 cues and 6 probes.
OUTPUT
o: 1xT output vector
"""
T = settings['T']
stim_on = settings['stim_on']
stim_dur = settings['stim_dur']
delay = settings['delay']
DeltaT = settings['DeltaT']
# Load the trained mat file
var = scipy.io.loadmat(model_dir)
# Get some additional params
N = var['N'][0][0]
exc_ind = [np.bool(i) for i in var['exc']]
# Get the delays
taus_gaus = var['taus_gaus']
taus = var['taus'][0] # tau [min, max]
taus_sig = (1/(1+np.exp(-taus_gaus))*(taus[1] - taus[0])) + taus[0]
# Synaptic currents and firing-rates
x = np.zeros((N, T)) # synaptic currents
r = np.zeros((N, T)) # firing-rates
x[:, 0] = np.random.randn(N, )/100
r[:, 0] = 1/(1 + np.exp(-x[:, 0]))
# r[:, 0] = np.minimum(np.maximum(x[:, 0], 0), 1) #clipped relu
# r[:, 0] = np.clip(np.minimum(np.maximum(x[:, 0], 0), 1), None, 10) #clipped relu
# r[:, 0] = np.clip(np.log(np.exp(x[:, 0])+1), None, 10) # softplus
# r[:, 0] = np.minimum(np.maximum(x[:, 0], 0), 6)/6 #clipped relu6
# Output
o = np.zeros((T, ))
o_counter = 0
# Recurrent weights and masks
# w = var['w0'] #!!!!!!!!!!!!
w = var['w']
m = var['m']
som_m = var['som_m']
som_N = var['som_N'][0][0]
# Identify excitatory/inhibitory neurons
exc = var['exc']
exc_ind = np.where(exc == 1)[0]
inh = var['inh']
inh_ind = np.where(inh == 1)[0]
som_inh_ind = inh_ind[:som_N]
for t in range(1, T):
# next_x is [N x 1]
ww = np.matmul(w, m)
ww = np.multiply(ww, som_m)
# next_x = (1 - DeltaT/tau)*x[:, t-1] + \
# (DeltaT/tau)*(np.matmul(ww, r[:, t-1]) + \
# np.matmul(var['w_in'], u[:, t-1])) + \
# np.random.randn(N, )/10
next_x = np.multiply((1 - DeltaT/taus_sig), np.expand_dims(x[:, t-1], 1)) + \
np.multiply((DeltaT/taus_sig), ((np.matmul(ww, np.expand_dims(r[:, t-1], 1)))\
+ np.matmul(var['w_in'], np.expand_dims(u[:, t-1], 1)))) +\
np.random.randn(N, 1)/10
x[:, t] = np.squeeze(next_x)
r[:, t] = 1/(1 + np.exp(-x[:, t]))
# r[:, t] = np.minimum(np.maximum(x[:, t], 0), 1)
# r[:, t] = np.clip(np.minimum(np.maximum(x[:, t], 0), 1), None, 10)
# r[:, t] = np.clip(np.log(np.exp(x[:, t])+1), None, 10) # softplus
# r[:, t] = np.minimum(np.maximum(x[:, t], 0), 6)/6
wout = var['w_out']
wout_exc = wout[0, exc_ind]
wout_inh = wout[0, inh_ind]
r_exc = r[exc_ind, :]
r_inh = r[inh_ind, :]
o[o_counter] = | np.matmul(wout, r[:, t]) | numpy.matmul |
import numpy as np
def flatten(matrix):
flat_matrix = matrix.flatten()
if len(flat_matrix)==0:
flat_matrix = np.array([0])
return flat_matrix
def one_hot_and_reduce(categorical_array, one_hot_dim):
try:
one_hot_array = np.eye(one_hot_dim)[categorical_array]
agregated_array = np.sum(one_hot_array, axis=0)
except IndexError:
#print("index error : the one hot array will be replaced by an array of 0")
agregated_array = np.zeros(one_hot_dim)
return agregated_array
def preprocess_last_actions(array):
#Because 'last_actions' can have a variable length we need to add a
#default value of '-1' when 'last_actions' length is '0' or '1' :
if len(array) == 0:
output = np.array([0, 0])
elif len(array) == 1 :
output = np.append(np.array([0]), array)
else:
output = array
return output
def preprocess_non_spatial(matrix, layer):
if len(matrix) == 0:
if layer == 'action':
output = np.array([0])
elif layer == 'multi_select':
output = np.array([[0,0,0,0,0,0,0]])
else:
output = matrix
return output
def preprocess_game_loop(array):
array[array==0]=1
array = np.log(array)
return array
def preprocess_alerts(array):
if len(array) == 0 :
output = np.array([0, 0])
elif len(array) == 1:
output = np.append(array, np.array([0]))
elif len(array) == 2:
output = np.array(array)
else:
output = np.array(array[0:2])
return output
def preprocess_quantitative_arrays(array, arg):
output_array = []
for index, value in enumerate(array):
if value > 0:
if arg == 'score_cumulative':
if index > 0:
value = np.log(value)
elif arg == 'player':
value = | np.log(value) | numpy.log |
################################################################################
# Copyright (C) 2013-2014 <NAME>
#
# This file is licensed under the MIT License.
################################################################################
"""
Unit tests for gaussian_markov_chain module.
"""
import numpy as np
from ..gaussian_markov_chain import GaussianMarkovChain
from ..gaussian_markov_chain import VaryingGaussianMarkovChain
from ..gaussian import Gaussian, GaussianMoments
from ..gaussian import GaussianARD
from ..gaussian import GaussianGamma
from ..wishart import Wishart, WishartMoments
from ..gamma import Gamma, GammaMoments
from bayespy.utils import random
from bayespy.utils import linalg
from bayespy.utils import misc
from bayespy.utils.misc import TestCase
def kalman_filter(y, U, A, V, mu0, Cov0, out=None):
"""
Perform Kalman filtering to obtain filtered mean and covariance.
The parameters of the process may vary in time, thus they are
given as iterators instead of fixed values.
Parameters
----------
y : (N,D) array
"Normalized" noisy observations of the states, that is, the
observations multiplied by the precision matrix U (and possibly
other transformation matrices).
U : (N,D,D) array or N-list of (D,D) arrays
Precision matrix (i.e., inverse covariance matrix) of the observation
noise for each time instance.
A : (N-1,D,D) array or (N-1)-list of (D,D) arrays
Dynamic matrix for each time instance.
V : (N-1,D,D) array or (N-1)-list of (D,D) arrays
Covariance matrix of the innovation noise for each time instance.
Returns
-------
mu : array
Filtered mean of the states.
Cov : array
Filtered covariance of the states.
See also
--------
rts_smoother
"""
mu = mu0
Cov = Cov0
# Allocate memory for the results
(N,D) = np.shape(y)
X = np.empty((N,D))
CovX = np.empty((N,D,D))
# Update step for t=0
M = np.dot(np.dot(Cov, U[0]), Cov) + Cov
L = linalg.chol(M)
mu = np.dot(Cov, linalg.chol_solve(L, np.dot(Cov,y[0]) + mu))
Cov = np.dot(Cov, linalg.chol_solve(L, Cov))
X[0,:] = mu
CovX[0,:,:] = Cov
#for (yn, Un, An, Vn) in zip(y, U, A, V):
for n in range(len(y)-1): #(yn, Un, An, Vn) in zip(y, U, A, V):
# Prediction step
mu = np.dot(A[n], mu)
Cov = np.dot(np.dot(A[n], Cov), A[n].T) + V[n]
# Update step
M = np.dot(np.dot(Cov, U[n+1]), Cov) + Cov
L = linalg.chol(M)
mu = np.dot(Cov, linalg.chol_solve(L, np.dot(Cov,y[n+1]) + mu))
Cov = np.dot(Cov, linalg.chol_solve(L, Cov))
# Force symmetric covariance (for numeric inaccuracy)
Cov = 0.5*Cov + 0.5*Cov.T
# Store results
X[n+1,:] = mu
CovX[n+1,:,:] = Cov
return (X, CovX)
def rts_smoother(mu, Cov, A, V, removethis=None):
"""
Perform Rauch-Tung-Striebel smoothing to obtain the posterior.
The function returns the posterior mean and covariance of each
state. The parameters of the process may vary in time, thus they
are given as iterators instead of fixed values.
Parameters
----------
mu : (N,D) array
Mean of the states from Kalman filter.
Cov : (N,D,D) array
Covariance of the states from Kalman filter.
A : (N-1,D,D) array or (N-1)-list of (D,D) arrays
Dynamic matrix for each time instance.
V : (N-1,D,D) array or (N-1)-list of (D,D) arrays
Covariance matrix of the innovation noise for each time instance.
Returns
-------
mu : array
Posterior mean of the states.
Cov : array
Posterior covariance of the states.
See also
--------
kalman_filter
"""
N = len(mu)
#n = N-1
# Start from the last time instance and smoothen backwards
x = mu[-1,:]
Covx = Cov[-1,:,:]
for n in reversed(range(N-1)):#(An, Vn) in zip(reversed(A), reversed(V)):
#n = n - 1
#if n <= 0:
# break
# The predicted value of n
x_p = np.dot(A[n], mu[n,:])
Cov_p = np.dot(np.dot(A[n], Cov[n,:,:]), A[n].T) + V[n]
# Temporary variable
S = np.linalg.solve(Cov_p, np.dot(A[n], Cov[n,:,:]))
# Smoothed value of n
x = mu[n,:] + np.dot(S.T, x-x_p)
Covx = Cov[n,:,:] + np.dot(np.dot(S.T, Covx-Cov_p), S)
# Force symmetric covariance (for numeric inaccuracy)
Covx = 0.5*Covx + 0.5*Covx.T
# Store results
mu[n,:] = x
Cov[n,:] = Covx
return (mu, Cov)
class TestGaussianMarkovChain(TestCase):
def create_model(self, N, D):
# Construct the model
Mu = Gaussian(np.random.randn(D),
np.identity(D))
Lambda = Wishart(D,
random.covariance(D))
A = Gaussian(np.random.randn(D,D),
np.identity(D))
V = Gamma(D,
np.random.rand(D))
X = GaussianMarkovChain(Mu, Lambda, A, V, n=N)
Y = Gaussian(X, np.identity(D))
return (Y, X, Mu, Lambda, A, V)
def test_plates(self):
"""
Test that plates are handled correctly.
"""
def test_message_to_mu0(self):
pass
def test_message_to_Lambda0(self):
pass
def test_message_to_A(self):
pass
def test_message_to_v(self):
pass
def test_message_to_parents(self):
""" Check gradient passed to inputs parent node """
N = 3
D = 2
Mu = Gaussian(np.random.randn(D), random.covariance(D))
Lambda = Wishart(D, random.covariance(D))
A = Gaussian(np.random.randn(D,D), random.covariance(D))
V = Gamma(D, np.random.rand(D))
X = GaussianMarkovChain(Mu, Lambda, A, V, n=N+1)
Y = Gaussian(X, random.covariance(D))
self.assert_moments(
X,
postprocess=lambda u: [
u[0],
u[1] + linalg.transpose(u[1], ndim=1),
u[2]
]
)
Y.observe(np.random.randn(N+1, D))
self.assert_message_to_parent(X, Mu, eps=1e-8)
self.assert_message_to_parent(
X,
Lambda,
eps=1e-8,
postprocess=lambda u: [
u[0] + linalg.transpose(u[0], ndim=1),
u[1],
]
)
self.assert_message_to_parent(X, A)
self.assert_message_to_parent(X, V, eps=1e-10, atol=1e-5)
pass
def test_message_to_parents_with_inputs(self):
""" Check gradient passed to inputs parent node """
def check(Mu, Lambda, A, V, U):
X = GaussianMarkovChain(Mu, Lambda, A, V, inputs=U)
Y = Gaussian(X, random.covariance(D))
# Check moments
self.assert_moments(
X,
postprocess=lambda u: [
u[0],
u[1] + linalg.transpose(u[1], ndim=1),
u[2]
]
)
Y.observe(np.random.randn(N+1, D))
X.update()
# Check gradient messages to parents
self.assert_message_to_parent(X, Mu)
self.assert_message_to_parent(
X,
Lambda,
postprocess=lambda phi: [
phi[0] + linalg.transpose(phi[0], ndim=1),
phi[1]
]
)
self.assert_message_to_parent(
X,
A,
postprocess=lambda phi: [
phi[0],
phi[1] + linalg.transpose(phi[1], ndim=1),
]
)
self.assert_message_to_parent(X, V)
self.assert_message_to_parent(X, U)
N = 4
D = 2
K = 3
check(
Gaussian(
np.random.randn(D),
random.covariance(D)
),
Wishart(
D,
random.covariance(D)
),
Gaussian(
np.random.randn(D,D+K),
random.covariance(D+K)
),
Gamma(
D,
np.random.rand(D)
),
Gaussian(
np.random.randn(N,K),
random.covariance(K)
)
)
check(
Gaussian(
np.random.randn(D),
random.covariance(D)
),
Wishart(
D,
random.covariance(D)
),
GaussianGamma(
np.random.randn(D,D+K),
random.covariance(D+K),
D,
np.random.rand(D),
ndim=1
),
Gamma(
D,
np.random.rand(D)
),
Gaussian(
np.random.randn(N,K),
random.covariance(K)
)
)
pass
def test_message_to_child(self):
"""
Test the updating of GaussianMarkovChain.
Check that the moments and the lower bound contribution are computed
correctly.
"""
# TODO: Add plates and missing values!
# Dimensionalities
D = 3
N = 5
(Y, X, Mu, Lambda, A, V) = self.create_model(N, D)
# Inference with arbitrary observations
y = np.random.randn(N,D)
Y.observe(y)
X.update()
(x_vb, xnxn_vb, xpxn_vb) = X.get_moments()
# Get parameter moments
(mu0, mumu0) = Mu.get_moments()
(icov0, logdet0) = Lambda.get_moments()
(a, aa) = A.get_moments()
(icov_x, logdetx) = V.get_moments()
icov_x = np.diag(icov_x)
# Prior precision
Z = np.einsum('...kij,...kk->...ij', aa, icov_x)
U_diag = [icov0+Z] + (N-2)*[icov_x+Z] + [icov_x]
U_super = (N-1) * [-np.dot(a.T, icov_x)]
U = misc.block_banded(U_diag, U_super)
# Prior mean
mu_prior = np.zeros(D*N)
mu_prior[:D] = np.dot(icov0,mu0)
# Data
Cov = np.linalg.inv(U + np.identity(D*N))
mu = np.dot(Cov, mu_prior + y.flatten())
# Moments
xx = mu[:,np.newaxis]*mu[np.newaxis,:] + Cov
mu = np.reshape(mu, (N,D))
xx = np.reshape(xx, (N,D,N,D))
# Check results
self.assertAllClose(x_vb, mu,
msg="Incorrect mean")
for n in range(N):
self.assertAllClose(xnxn_vb[n,:,:], xx[n,:,n,:],
msg="Incorrect second moment")
for n in range(N-1):
self.assertAllClose(xpxn_vb[n,:,:], xx[n,:,n+1,:],
msg="Incorrect lagged second moment")
# Compute the entropy H(X)
ldet = linalg.logdet_cov(Cov)
H = random.gaussian_entropy(-ldet, N*D)
# Compute <log p(X|...)>
xx = np.reshape(xx, (N*D, N*D))
mu = np.reshape(mu, (N*D,))
ldet = -logdet0 - np.sum(np.ones((N-1,D))*logdetx)
P = random.gaussian_logpdf(np.einsum('...ij,...ij',
xx,
U),
np.einsum('...i,...i',
mu,
mu_prior),
np.einsum('...ij,...ij',
mumu0,
icov0),
-ldet,
N*D)
# The VB bound from the net
l = X.lower_bound_contribution()
self.assertAllClose(l, H+P)
# Compute the true bound <log p(X|...)> + H(X)
#
# Simple tests
#
def check(N, D, plates=None, mu=None, Lambda=None, A=None, V=None):
if mu is None:
mu = np.random.randn(D)
if Lambda is None:
Lambda = random.covariance(D)
if A is None:
A = np.random.randn(D,D)
if V is None:
V = np.random.rand(D)
X = GaussianMarkovChain(mu,
Lambda,
A,
V,
plates=plates,
n=N)
(u0, u1, u2) = X._message_to_child()
(mu, mumu) = Gaussian._ensure_moments(mu, GaussianMoments, ndim=1).get_moments()
(Lambda, _) = Wishart._ensure_moments(Lambda, WishartMoments, ndim=1).get_moments()
(a, aa) = Gaussian._ensure_moments(A, GaussianMoments, ndim=1).get_moments()
a = a * np.ones((N-1,D,D)) # explicit broadcasting for simplicity
aa = aa * np.ones((N-1,D,D,D)) # explicit broadcasting for simplicity
(v, _) = Gamma._ensure_moments(V, GammaMoments).get_moments()
v = v * np.ones((N-1,D))
plates_C = X.plates
plates_mu = X.plates
C = np.zeros(plates_C + (N,D,N,D))
plates_mu = np.shape(mu)[:-1]
m = np.zeros(plates_mu + (N,D))
m[...,0,:] = np.einsum('...ij,...j->...i', Lambda, mu)
C[...,0,:,0,:] = Lambda + np.einsum('...dij,...d->...ij',
aa[...,0,:,:,:],
v[...,0,:])
for n in range(1,N-1):
C[...,n,:,n,:] = (np.einsum('...dij,...d->...ij',
aa[...,n,:,:,:],
v[...,n,:])
+ v[...,n,:,None] * np.identity(D))
for n in range(N-1):
C[...,n,:,n+1,:] = -np.einsum('...di,...d->...id',
a[...,n,:,:],
v[...,n,:])
C[...,n+1,:,n,:] = -np.einsum('...di,...d->...di',
a[...,n,:,:],
v[...,n,:])
C[...,-1,:,-1,:] = v[...,-1,:,None]*np.identity(D)
C = np.reshape(C, plates_C+(N*D,N*D))
Cov = np.linalg.inv(C)
Cov = np.reshape(Cov, plates_C+(N,D,N,D))
m0 = np.einsum('...minj,...nj->...mi', Cov, m)
m1 = np.zeros(plates_C+(N,D,D))
m2 = np.zeros(plates_C+(N-1,D,D))
for n in range(N):
m1[...,n,:,:] = Cov[...,n,:,n,:] + np.einsum('...i,...j->...ij',
m0[...,n,:],
m0[...,n,:])
for n in range(N-1):
m2[...,n,:,:] = Cov[...,n,:,n+1,:] + np.einsum('...i,...j->...ij',
m0[...,n,:],
m0[...,n+1,:])
self.assertAllClose(m0, u0*np.ones(np.shape(m0)))
self.assertAllClose(m1, u1*np.ones(np.shape(m1)))
self.assertAllClose(m2, u2*np.ones(np.shape(m2)))
pass
check(4,1)
check(4,3)
#
# Test mu
#
# Simple
check(4,3,
mu=Gaussian(np.random.randn(3),
random.covariance(3)))
# Plates
check(4,3,
mu=Gaussian(np.random.randn(5,6,3),
random.covariance(3),
plates=(5,6)))
# Plates with moments broadcasted over plates
check(4,3,
mu=Gaussian(np.random.randn(3),
random.covariance(3),
plates=(5,)))
check(4,3,
mu=Gaussian(np.random.randn(1,3),
random.covariance(3),
plates=(5,)))
# Plates broadcasting
check(4,3,
plates=(5,),
mu=Gaussian(np.random.randn(3),
random.covariance(3),
plates=()))
check(4,3,
plates=(5,),
mu=Gaussian(np.random.randn(1,3),
random.covariance(3),
plates=(1,)))
#
# Test Lambda
#
# Simple
check(4,3,
Lambda=Wishart(10+np.random.rand(),
random.covariance(3)))
# Plates
check(4,3,
Lambda=Wishart(10+np.random.rand(),
random.covariance(3),
plates=(5,6)))
# Plates with moments broadcasted over plates
check(4,3,
Lambda=Wishart(10+np.random.rand(),
random.covariance(3),
plates=(5,)))
check(4,3,
Lambda=Wishart(10+np.random.rand(1),
random.covariance(3),
plates=(5,)))
# Plates broadcasting
check(4,3,
plates=(5,),
Lambda=Wishart(10+np.random.rand(),
random.covariance(3),
plates=()))
check(4,3,
plates=(5,),
Lambda=Wishart(10+np.random.rand(),
random.covariance(3),
plates=(1,)))
#
# Test A
#
# Simple
check(4,3,
A=GaussianARD(np.random.randn(3,3),
np.random.rand(3,3),
shape=(3,),
plates=(3,)))
# Plates on time axis
check(5,3,
A=GaussianARD(np.random.randn(4,3,3),
np.random.rand(4,3,3),
shape=(3,),
plates=(4,3)))
# Plates on time axis with broadcasted moments
check(5,3,
A=GaussianARD(np.random.randn(1,3,3),
np.random.rand(1,3,3),
shape=(3,),
plates=(4,3)))
check(5,3,
A=GaussianARD(np.random.randn(3,3),
np.random.rand(3,3),
shape=(3,),
plates=(4,3)))
# Plates
check(4,3,
A=GaussianARD(np.random.randn(5,6,1,3,3),
np.random.rand(5,6,1,3,3),
shape=(3,),
plates=(5,6,1,3)))
# Plates with moments broadcasted over plates
check(4,3,
A=GaussianARD(np.random.randn(3,3),
np.random.rand(3,3),
shape=(3,),
plates=(5,1,3)))
check(4,3,
A=GaussianARD(np.random.randn(1,1,3,3),
np.random.rand(1,1,3,3),
shape=(3,),
plates=(5,1,3)))
# Plates broadcasting
check(4,3,
plates=(5,),
A=GaussianARD(np.random.randn(3,3),
np.random.rand(3,3),
shape=(3,),
plates=(3,)))
check(4,3,
plates=(5,),
A=GaussianARD(np.random.randn(3,3),
np.random.rand(3,3),
shape=(3,),
plates=(1,1,3)))
#
# Test v
#
# Simple
check(4,3,
V=Gamma(np.random.rand(1,3),
np.random.rand(1,3),
plates=(1,3)))
check(4,3,
V=Gamma(np.random.rand(3),
np.random.rand(3),
plates=(3,)))
# Plates
check(4,3,
V=Gamma(np.random.rand(5,6,1,3),
np.random.rand(5,6,1,3),
plates=(5,6,1,3)))
# Plates with moments broadcasted over plates
check(4,3,
V=Gamma(np.random.rand(1,3),
np.random.rand(1,3),
plates=(5,1,3)))
check(4,3,
V=Gamma(np.random.rand(1,1,3),
np.random.rand(1,1,3),
plates=(5,1,3)))
# Plates broadcasting
check(4,3,
plates=(5,),
V=Gamma(np.random.rand(1,3),
np.random.rand(1,3),
plates=(1,3)))
check(4,3,
plates=(5,),
V=Gamma(np.random.rand(1,1,3),
np.random.rand(1,1,3),
plates=(1,1,3)))
#
# Check with input signals
#
mu = 2
Lambda = 3
A = 4
B = 5
v = 6
inputs = [[-2], [3]]
X = GaussianMarkovChain([mu], [[Lambda]], [[A,B]], [v], inputs=inputs)
V = (np.array([[v*A**2, -v*A, 0],
[-v*A, v*A**2, -v*A],
[0, -v*A, 0]]) +
np.array([[Lambda, 0, 0],
[0, v, 0],
[0, 0, v]]))
m = (np.array([Lambda*mu, 0, 0]) +
np.array([0, v*B*inputs[0][0], v*B*inputs[1][0]]) -
np.array([v*A*B*inputs[0][0], v*A*B*inputs[1][0], 0]))
Cov = np.linalg.inv(V)
mean = np.dot(Cov, m)
X.update()
u = X.get_moments()
self.assertAllClose(u[0], mean[:,None])
self.assertAllClose(u[1] - u[0][...,None,:]*u[0][...,:,None],
Cov[(0,1,2),(0,1,2),None,None])
self.assertAllClose(u[2] - u[0][...,:-1,:,None]*u[0][...,1:,None,:],
Cov[(0,1),(1,2),None,None])
pass
def test_smoothing(self):
"""
Test the posterior estimation of GaussianMarkovChain.
Create time-variant dynamics and compare the results of BayesPy VB
inference and standard Kalman filtering & smoothing.
This is not that useful anymore, because the moments are checked much
better in another test method.
"""
#
# Set up an artificial system
#
# Dimensions
N = 500
D = 2
# Dynamics (time varying)
A0 = np.array([[.9, -.4], [.4, .9]])
A1 = np.array([[.98, -.1], [.1, .98]])
l = np.linspace(0, 1, N-1).reshape((-1,1,1))
A = (1-l)*A0 + l*A1
# Innovation covariance matrix (time varying)
v = np.random.rand(D)
V = np.diag(v)
# Observation noise covariance matrix
C = np.identity(D)
#
# Simulate data
#
X = np.empty((N,D))
Y = np.empty((N,D))
x = np.array([0.5, -0.5])
X[0,:] = x
Y[0,:] = x + np.random.multivariate_normal(np.zeros(D), C)
for n in range(N-1):
x = np.dot(A[n,:,:],x) + np.random.multivariate_normal(np.zeros(D), V)
X[n+1,:] = x
Y[n+1,:] = x + np.random.multivariate_normal(np.zeros(D), C)
#
# BayesPy inference
#
# Construct VB model
Xh = GaussianMarkovChain(np.zeros(D), np.identity(D), A, 1/v, n=N)
Yh = Gaussian(Xh, np.identity(D), plates=(N,))
# Put data
Yh.observe(Y)
# Run inference
Xh.update()
# Store results
Xh_vb = Xh.u[0]
CovXh_vb = Xh.u[1] - Xh_vb[...,np.newaxis,:] * Xh_vb[...,:,np.newaxis]
#
# "The ground truth" using standard Kalman filter and RTS smoother
#
V = N*(V,)
UY = Y
U = N*(C,)
(Xh, CovXh) = kalman_filter(UY, U, A, V, np.zeros(D), np.identity(D))
(Xh, CovXh) = rts_smoother(Xh, CovXh, A, V)
#
# Check results
#
self.assertTrue(np.allclose(Xh_vb, Xh))
self.assertTrue(np.allclose(CovXh_vb, CovXh))
class TestVaryingGaussianMarkovChain(TestCase):
def test_plates_from_parents(self):
"""
Test that VaryingGaussianMarkovChain deduces plates correctly
"""
def check(plates_X,
plates_mu=(),
plates_Lambda=(),
plates_B=(),
plates_S=(),
plates_v=()):
D = 3
K = 2
N = 4
np.random.seed(42)
mu = Gaussian(np.random.randn(*(plates_mu+(D,))),
random.covariance(D))
Lambda = Wishart(D+np.ones(plates_Lambda),
random.covariance(D))
B = GaussianARD(np.random.randn(*(plates_B+(D,D,K))),
1+np.random.rand(*(plates_B+(D,D,K))),
shape=(D,K),
plates=plates_B+(D,))
S = GaussianARD(np.random.randn(*(plates_S+(N,K))),
1+np.random.rand(*(plates_S+(N,K))),
shape=(K,),
plates=plates_S+(N,))
v = Gamma(1+np.random.rand(*(plates_v+(1,D))),
1+np.random.rand(*(plates_v+(1,D))))
X = VaryingGaussianMarkovChain(mu, Lambda, B, S, v, name="X")
self.assertEqual(plates_X, X.plates,
msg="Incorrect plates deduced")
pass
check(())
check((2,3),
plates_mu=(2,3))
check((6,7),
plates_Lambda=(6,7))
check((2,3),
plates_B=(2,3))
check((2,3),
plates_S=(2,3))
check((2,3),
plates_v=(2,3))
pass
def test_message_to_child(self):
# A very simple check before the more complex ones:
# 1-D process, k=1, fixed constant parameters
m = 1.0
l = 4.0
b = 2.0
s = [3.0, 8.0]
v = 5.0
X = VaryingGaussianMarkovChain([m],
[[l]],
[[[b]]],
[[s[0]],[s[1]]],
[v])
(u0, u1, u2) = X._message_to_child()
C = np.array([[l+b**2*s[0]**2*v, -b*s[0]*v, 0],
[ -b*s[0]*v, v+b**2*s[1]**2*v, -b*s[1]*v],
[ 0, -b*s[1]*v, v]])
Cov = np.linalg.inv(C)
m0 = np.dot(Cov, [[l*m], [0], [0]])
m1 = np.diag(Cov)[:,None,None] + m0[:,:,None]**2
m2 = np.diag(Cov, k=1)[:,None,None] + m0[1:,:,None]*m0[:-1,:,None]
self.assertAllClose(m0, u0)
self.assertAllClose(m1, u1)
self.assertAllClose(m2, u2)
def check(N, D, K, plates=None, mu=None, Lambda=None, B=None, S=None, V=None):
if mu is None:
mu = np.random.randn(D)
if Lambda is None:
Lambda = random.covariance(D)
if B is None:
B = np.random.randn(D,D,K)
if S is None:
S = np.random.randn(N-1,K)
if V is None:
V = np.random.rand(D)
X = VaryingGaussianMarkovChain(mu,
Lambda,
B,
S,
V,
plates=plates,
n=N)
(u0, u1, u2) = X._message_to_child()
(mu, mumu) = X.parents[0].get_moments()
(Lambda, _) = X.parents[1].get_moments()
(b, bb) = X.parents[2].get_moments()
(s, ss) = X.parents[3].get_moments()
(v, _) = X.parents[4].get_moments()
v = v * np.ones((N-1,D))
#V = np.atleast_3d(v)[...,-1,:,None]*np.identity(D)
plates_C = X.plates
plates_mu = X.plates
C = np.zeros(plates_C + (N,D,N,D))
plates_mu = np.shape(mu)[:-1]
m = np.zeros(plates_mu + (N,D))
m[...,0,:] = np.einsum('...ij,...j->...i', Lambda, mu)
#m = np.reshape(m, plates_mu + (N*D,))
A = np.einsum('...dik,...nk->...ndi', b, s)
AA = np.einsum('...dikjl,...nkl->...ndij', bb, ss)
C[...,0,:,0,:] = Lambda + np.einsum('...dij,...d->...ij',
AA[...,0,:,:,:],
v[...,0,:])
for n in range(1,N-1):
C[...,n,:,n,:] = (np.einsum('...dij,...d->...ij',
AA[...,n,:,:,:],
v[...,n,:])
+ v[...,n,:,None] * np.identity(D))
for n in range(N-1):
C[...,n,:,n+1,:] = -np.einsum('...di,...d->...id',
A[...,n,:,:],
v[...,n,:])
C[...,n+1,:,n,:] = -np.einsum('...di,...d->...di',
A[...,n,:,:],
v[...,n,:])
C[...,-1,:,-1,:] = v[...,-1,:,None]*np.identity(D)
C = np.reshape(C, plates_C+(N*D,N*D))
Cov = np.linalg.inv(C)
Cov = np.reshape(Cov, plates_C+(N,D,N,D))
m0 = np.einsum('...minj,...nj->...mi', Cov, m)
m1 = np.zeros(plates_C+(N,D,D))
m2 = np.zeros(plates_C+(N-1,D,D))
for n in range(N):
m1[...,n,:,:] = Cov[...,n,:,n,:] + np.einsum('...i,...j->...ij',
m0[...,n,:],
m0[...,n,:])
for n in range(N-1):
m2[...,n,:,:] = Cov[...,n,:,n+1,:] + np.einsum('...i,...j->...ij',
m0[...,n,:],
m0[...,n+1,:])
self.assertAllClose(m0, u0*np.ones(np.shape(m0)))
self.assertAllClose(m1, u1*np.ones(np.shape(m1)))
self.assertAllClose(m2, u2*np.ones(np.shape(m2)))
pass
check(2,1,1)
check(2,3,1)
check(2,1,3)
check(4,3,2)
#
# Test mu
#
# Simple
check(4,3,2,
mu=Gaussian(np.random.randn(3),
random.covariance(3)))
# Plates
check(4,3,2,
mu=Gaussian(np.random.randn(5,6,3),
random.covariance(3),
plates=(5,6)))
# Plates with moments broadcasted over plates
check(4,3,2,
mu=Gaussian(np.random.randn(3),
random.covariance(3),
plates=(5,)))
check(4,3,2,
mu=Gaussian(np.random.randn(1,3),
random.covariance(3),
plates=(5,)))
# Plates broadcasting
check(4,3,2,
plates=(5,),
mu=Gaussian(np.random.randn(3),
random.covariance(3),
plates=()))
check(4,3,2,
plates=(5,),
mu=Gaussian(np.random.randn(1,3),
random.covariance(3),
plates=(1,)))
#
# Test Lambda
#
# Simple
check(4,3,2,
Lambda=Wishart(10+np.random.rand(),
random.covariance(3)))
# Plates
check(4,3,2,
Lambda=Wishart(10+np.random.rand(),
random.covariance(3),
plates=(5,6)))
# Plates with moments broadcasted over plates
check(4,3,2,
Lambda=Wishart(10+np.random.rand(),
random.covariance(3),
plates=(5,)))
check(4,3,2,
Lambda=Wishart(10+np.random.rand(1),
random.covariance(3),
plates=(5,)))
# Plates broadcasting
check(4,3,2,
plates=(5,),
Lambda=Wishart(10+np.random.rand(),
random.covariance(3),
plates=()))
check(4,3,2,
plates=(5,),
Lambda=Wishart(10+np.random.rand(),
random.covariance(3),
plates=(1,)))
#
# Test B
#
# Simple
check(4,3,2,
B=GaussianARD( | np.random.randn(3,3,2) | numpy.random.randn |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""Helper file containing activation functions
"""
import numpy as np
def sigmoid(x):
"""Description: Calculates the sigmoid for each value in the the input array
Params:
x: Array for which sigmoid is to be calculated
Returns:
ndarray: Sigmoid of the input
"""
return 1.0 / (1.0 + np.exp(-x))
def delta_sigmoid(x):
"""Description: Calculates the sigmoid derivative for the input array
Params:
x: Array for which sigmoid derivative is to be calculated
Returns:
ndarray: Sigmoid derivative of the input
"""
return sigmoid(x) * (1 - sigmoid(x))
def softmax(x):
"""Description: Calculates softmax for each set of scores in the input array
Params:
x: Array for which softmax is to be calculated
(axis_0 is the feature dimension, axis_1 is the n_samples dim)
Returns:
ndarray: Softmax of the input
"""
e_x = np.exp(x - | np.max(x, axis=0) | numpy.max |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 11 12:16:04 2019
@author: gryang
"""
import os
import sys
import pickle
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
rootpath = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(rootpath)
import tools
mpl.rcParams['font.size'] = 7
mpl.rcParams['pdf.fonttype'] = 42
mpl.rcParams['ps.fonttype'] = 42
mpl.rcParams['font.family'] = 'arial'
# mpl.rcParams['text.usetex'] = 'true'
mpl.rcParams['mathtext.fontset'] = 'stix'
def load_optimal_K(filename, v_name):
print(filename)
with open(filename, "rb") as f:
# values is a dictionary of lists
values = pickle.load(f)
# print(values[0]['dim'])
# TODO: TEMPORARY HACK to make withdim analysis work
if isinstance(values, list):
values = values[0]
for key, val in values.items():
values[key] = np.array(val)
choose = np.argmax if v_name in ['dim'] else np.argmin
optimal_Ks = list()
for ind in np.unique(values['ind']): # repetition indices
idx = values['ind'] == ind # idx of current repetition index
v_vals = values[v_name][idx]
optimal_Ks.append(values['K'][idx][choose(v_vals)])
means = [np.mean(
np.random.choice(optimal_Ks, size=len(optimal_Ks), replace=True)) for _
in range(1000)]
optimal_K = np.mean(optimal_Ks)
conf_int = np.percentile(means, [2.5, 97.5])
K_range = np.unique(values['K'])
return optimal_K, conf_int, K_range
def get_sparsity_from_training(path):
import standard.analysis_pn2kc_training as analysis_pn2kc_training
dirs = [os.path.join(path, n) for n in os.listdir(path)]
sparsitys = list()
n_ors = list()
for i, d in enumerate(dirs):
config = tools.load_config(d)
print('N: ', config.N_PN)
sparsity = analysis_pn2kc_training.compute_sparsity(
d, epoch=-1, dynamic_thres=False, visualize=True)
n_ors.append(config.N_PN)
sparsitys.append(sparsity[sparsity>0].mean())
print('Prop neurons with zero-weights: {:0.3f}'.format(np.mean(sparsity==0)))
n_ors = np.array(n_ors)
sparsitys = np.array(sparsitys)
indsort = np.argsort(n_ors)
return sparsitys[indsort], n_ors[indsort]
def _load_result(filename, v_name='theta'):
dirs = os.listdir(os.path.join(rootpath, 'files', 'analytical'))
xs = [int(d[len(filename):-len('.pkl')]) for d in dirs if filename in d]
xs = np.sort(xs)
optimal_Ks = list()
conf_ints = list()
yerr_low = list()
yerr_high = list()
for value in xs:
fn = filename + str(value)
_filename = './files/analytical/' + fn + '.pkl'
optimal_K, conf_int, K_range = load_optimal_K(_filename, v_name=v_name)
# print('m:' + str(value))
print('optimal K:' + str(optimal_K))
print('confidence interval: ' + str(conf_int))
print('K range: ' + str(K_range))
print('')
optimal_Ks.append(optimal_K)
conf_ints.append(conf_int)
yerr_low.append(optimal_K-conf_int[0])
yerr_high.append(conf_int[1]-optimal_K)
return xs, np.array(optimal_Ks)
def load_result(filenames, v_name='theta'):
optimal_Ks = list()
conf_ints = list()
yerr_low = list()
yerr_high = list()
for filename in filenames:
optimal_K, conf_int, K_range = load_optimal_K(filename, v_name=v_name)
print('Load results from ' + filename)
# print('m:' + str(value))
print('optimal K:' + str(optimal_K))
print('confidence interval: ' + str(conf_int))
print('K range: ' + str(K_range))
print('')
optimal_Ks.append(optimal_K)
conf_ints.append(conf_int)
yerr_low.append(optimal_K - conf_int[0])
yerr_high.append(conf_int[1] - optimal_K)
conf_ints = np.array(conf_ints)
return np.array(optimal_Ks), conf_ints
def _fit(x, y):
# x_fit = np.linspace(x[0], x[-1], 100)
x_fit = np.linspace(min(np.log(50),x[0]), max(np.log(1000),x[-1]), 100)
# model = Ridge()
model = LinearRegression()
model.fit(x[:, np.newaxis], y)
y_fit = model.predict(x_fit[:, np.newaxis])
return x_fit, y_fit, model
def main():
x, y = _load_result('all_value_m', v_name='theta')
x, y = np.log(x), np.log(y)
x_fit, y_fit, model = _fit(x, y)
res_perturb = {'log_N': x, 'log_K': y, 'label': 'Weight robustness'}
res_perturb_fit = {'log_N': x_fit, 'log_K': y_fit, 'model': model,
'label': r'$K ={:0.2f} \ N^{{{:0.2f}}}$'.format(
np.exp(model.intercept_), model.coef_[0])}
x, y = _load_result('all_value_withdim_m', v_name='dim')
x, y = np.log(x), np.log(y)
x_fit, y_fit, model = _fit(x, y)
res_dim = {'log_N': x, 'log_K': y}
res_dim_fit = {'log_N': x_fit, 'log_K': y_fit, 'model': model,
'label': r'$K ={:0.2f} \ N^{{{:0.2f}}}$'.format(
np.exp(model.intercept_), model.coef_[0])}
# Get results from training
path = os.path.join(rootpath, 'files', 'vary_n_orn2')
sparsitys, n_ors = get_sparsity_from_training(path)
ind_show = (n_ors>=50) * (n_ors<500)
# TODO: The smaller than 500 is just because N=500 didn't finish training
x, y = n_ors[ind_show], sparsitys[ind_show]
print(x, y)
# x = [50, 100, 200]
# y = [7.3, 10.17, 18.3]
# y[np.where(x==100)[0][0]] = 13.6
# y[np.where(x==200)[0][0]] = 16
# # TODO: TEMPORARY!!
# x, y = np.array([50, 100, 200]), np.array([7, 17, 31])
res_train = {'log_N': np.log(x),
'log_K': np.log(y), 'label': 'Train'}
x, y = res_train['log_N'], res_train['log_K']
x_fit = np.linspace(np.log(50), np.log(1000), 3)
model = LinearRegression()
model.fit(x[:, np.newaxis], y)
y_fit = model.predict(x_fit[:, np.newaxis])
res_train_fit = {'log_N': x_fit, 'log_K': y_fit, 'model': model,
'label': r'$K ={:0.2f} \ N^{{{:0.2f}}}$'.format(
np.exp(model.intercept_), model.coef_[0])}
file = os.path.join(rootpath, 'files', 'analytical', 'optimal_k_two_term')
with open(file+'.pkl', 'rb') as f:
res_twoterm = pickle.load(f)
ind = (res_twoterm['ms'] >= 50) * (res_twoterm['ms'] <= 1000)
res_twoterm['log_N'] = np.log(res_twoterm['ms'][ind])
res_twoterm['log_K'] = np.log(res_twoterm['optimal_Ks'])[ind]
fig = plt.figure(figsize=(4, 3.))
ax = fig.add_axes([0.2, 0.2, 0.7, 0.7])
ax.spines["right"].set_visible(False)
ax.spines["top"].set_visible(False)
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
res_list = [res_train, res_perturb, res_perturb_fit, res_twoterm,
res_dim, res_dim_fit]
labels = ['Train', 'Weight robustness', res_perturb_fit['label'],
'Two-term approx.', 'Dimensionality', res_dim_fit['label']]
markers = ['+', 'o', '-', '-', 'o', '-']
mss = [8, 4, 4, 4, 4, 4]
zorders = [5, 4, 3, 2, 1, 0]
colors = ['black', tools.red, tools.red, tools.red*0.5, tools.gray, tools.gray]
for i, res in enumerate(res_list):
ax.plot(res['log_N'], res['log_K'], markers[i], ms=mss[i],
label=labels[i], color=colors[i], zorder=zorders[i])
ax.plot(np.log(1000), np.log(100), 'x', color=tools.darkblue)
ax.text(np.log(900), | np.log(120) | numpy.log |
## @package teetool
# This module contains the Visual_2d class
#
# See Visual_2d class for more details
import numpy as np
from scipy.interpolate import griddata
import matplotlib.pyplot as plt
import teetool as tt
## Visual_2d class generates the 2d output using Matplotlib
#
# Even 3-dimensional trajectories can be output in 2d (sliced)
class Visual_2d(object):
## Constructor for Visual_2d
# @param self object pointer
# @param thisWorld World object, filled with trajectory data and models
# @param kwargs additional parameters for plt.figure()
def __init__(self, thisWorld, **kwargs):
"""
<description>
"""
## figure object
self._fig = plt.figure(facecolor="white", **kwargs)
## axis object
self._ax = self._fig.gca()
# set colour of axis
#self._ax.set_axis_bgcolor('white')
#self._ax.set_facecolor('white')
## World object
self._world = thisWorld
## Labels of plots
self._labels = []
## Plot mean of trajectories
# @param self object pointer
# @param list_icluster list of clusters to plot
# @param colour if specified, overwrites distinct colours
# @param kwargs additional parameters for plotting
def plotMean(self, list_icluster=None, colour=None, **kwargs):
# check validity
list_icluster = self._world._check_list_icluster(list_icluster)
# extract data
clusters = self._world.getCluster(list_icluster)
# unique colours
colours = tt.helpers.getDistinctColours(len(self._world._clusters),
colour)
for (i, this_cluster) in enumerate(clusters):
# pass clusters
Y = this_cluster["model"].getMean()
a_line, = self._ax.plot(Y[:, 0],
Y[:, 1],
color=colours[list_icluster[i]],
**kwargs)
## Plot trajectories of cluster
# @param self object pointer
# @param list_icluster list of clusters to plot
# @param ntraj maximum number of trajectories
# @param colour if specified, overwrites distinct colours
# @param kwargs additional parameters for plotting
def plotTrajectories(self,
list_icluster=None,
ntraj=50,
colour=None,
**kwargs):
# check validity
list_icluster = self._world._check_list_icluster(list_icluster)
# extract data
clusters = self._world.getCluster(list_icluster)
# unique colours
colours = tt.helpers.getDistinctColours(len(self._world._clusters),
colour)
for (i, this_cluster) in enumerate(clusters):
# pass clusters
for itraj, (x, Y) in enumerate(this_cluster["data"]):
a_line, = self._ax.plot(Y[:, 0],
Y[:, 1],
color=colours[i],
**kwargs)
# limit number of trajectories
if itraj > ntraj:
break
self._labels.append((a_line, "data"))
## Plot trajectories of cluster
# @param self object pointer
# @param x1 point from [0,1] to visualise
# @param list_icluster list of clusters to plot
# @param ntraj maximum number of trajectories
# @param colour if specified, overwrites distinct colours
# @param kwargs additional parameters for plotting
def plotTrajectoriesPoints(self,
x1,
list_icluster=None,
ntraj=50,
colour=None,
**kwargs):
# check validity
list_icluster = self._world._check_list_icluster(list_icluster)
# obtain points
clustersP = self._world.getClusterPoints(x1, list_icluster)
# unique colours
colours = tt.helpers.getDistinctColours(len(self._world._clusters),
colour)
for (i, A) in enumerate(clustersP):
# pass clusters
for itraj, a in enumerate(A):
a_line, = self._ax.plot(a[0],
a[1],
color=colours[i],
**kwargs)
# limit number of trajectories
if itraj > ntraj:
break
self._labels.append((a_line, "data"))
## Plot time-series of trajectories
# @param self object pointer
# @param icluster select cluster to plot
# @param idim select dimension to plot
# @param ntraj maximum number of trajectories
# @param colour specificy colour of trajectories
# @param kwargs additional parameters for plotting
def plotTimeSeries(self, icluster=0, idim=0, ntraj=50,
colour='k', **kwargs):
# number of subplots, 2 or 3
ndim = self._world._ndim
# subplot
#f, axarr = plt.subplots(ndim, sharex=True)
# check validity
[icluster] = self._world._check_list_icluster([icluster])
# extract data
clusters = self._world.getCluster([icluster])
for (i, this_cluster) in enumerate(clusters):
# pass clusters
for itraj, (x, Y) in enumerate(this_cluster["data"]):
#for d in range(ndim):
x_norm = (x - x.min()) / (x.max() - x.min())
a_line, = self._ax.plot(x_norm,
Y[:,idim],
color=colour, **kwargs)
if itraj > ntraj:
break
self._labels.append((a_line, "data"))
## Plot a box based on two coordinates
# @param self object pointer
# @param coord_lowerleft lower-left coordinate (x,y)
# @param coord_upperright upper-right coordinate (x,y)
# @param kwargs additional parameters for plotting
def plotBox(self, coord_lowerleft, coord_upperright, **kwargs):
x_lo = coord_lowerleft[0]
x_hi = coord_upperright[0]
y_lo = coord_lowerleft[1]
y_hi = coord_upperright[1]
coords = np.array([[x_lo, y_lo],
[x_hi, y_lo],
[x_hi, y_hi],
[x_lo, y_hi],
[x_lo, y_lo]])
coords_x = coords[:,0]
coords_y = coords[:,1]
self._ax.plot(coords_x, coords_y, **kwargs)
## standard plotting function for Matplotlib
# @param self object pointer
# @param args additional arguments for plotting
# @param kwargs additional labeled parameters for plotting
def plot(self, *args, **kwargs):
# plot
self._ax.plot(*args, **kwargs)
## Plot samples of model
# @param self object pointer
# @param list_icluster list of clusters to plot
# @param ntraj number of trajectories
# @param colour if specified, overwrites distinct colours
# @param kwargs additional parameters for plotting
def plotSamples(self, list_icluster=None, ntraj=50, colour=None, **kwargs):
# check validity
list_icluster = self._world._check_list_icluster(list_icluster)
# unique colours
colours = tt.helpers.getDistinctColours(len(self._world._clusters),
colour)
for (i, icluster) in enumerate(list_icluster):
these_samples = self._world.getSamples(icluster,
nsamples=ntraj)
for (x, Y) in these_samples:
a_line, = self._ax.plot(Y[:, 0],
Y[:, 1],
color=colours[i],
linestyle=":",
**kwargs)
self._labels.append((a_line, "samples"))
## Add legend to plot
# @param self object pointer
def plotLegend(self):
list_lines = []
list_label = []
for (a_line, a_label) in self._labels:
list_lines.append(a_line)
list_label.append(a_label)
plt.legend(handles=list_lines, labels=list_label)
## Plots a confidence region of variance sigma
# @param self object pointer
# @param list_icluster list of clusters to plot
# @param sdwidth variance to evaluate
# @param z if specified, it evaluates the confidence region at a constant altitude for 3D trajectories
# @param resolution sets resolution for which to calculate the tube, can be a single integer, or an actual measurement [dim1 dim2] (2d) [dim1 dim2 dim3] (3d)
# @param colour if specified, overwrites distinct colours
# @param alpha opacity for the confidence region
# @param kwargs additional parameters for plotting
def plotTube(self,
list_icluster=None,
sdwidth=1,
z=None,
resolution=None,
colour=None,
alpha=.1,
**kwargs):
# check validity
list_icluster = self._world._check_list_icluster(list_icluster)
# extract
(ss_list, [xx, yy, zz]) = self._world.getTube(list_icluster,
sdwidth,
z=z,
resolution=resolution)
# unique colours
lcolours = tt.helpers.getDistinctColours(len(self._world._clusters),
colour)
for i, ss1 in enumerate(ss_list):
#plt.contourf(xx, yy, 1.*ss1, levels=[-np.inf, 1., np.inf], colors=(lcolours[i],), alpha=alpha, **kwargs)
# plot an iso surface line
plt.contour(xx,
yy,
ss1,
levels=[.5],
colors=(lcolours[list_icluster[i]], 'w'),
**kwargs)
## Plots the difference confidence region of variance sigma for two models
# @param self object pointer
# @param list_icluster list of 2 clusters to compare
# @param sdwidth variance to evaluate
# @param z if specified, it evaluates the confidence region at a constant altitude for 3D trajectories
# @param resolution specify resolution of region
# @param colour if specified, overwrites distinct colours
# @param alpha opacity for the confidence region
# @param kwargs additional parameters for plotting
def plotTubeDifference(self,
list_icluster=None,
sdwidth=1,
z=None,
resolution=None,
colour=None,
alpha=.1,
**kwargs):
# check validity
list_icluster = self._world._check_list_icluster(list_icluster)
# extract first two only!
list_icluster = list_icluster[:2]
# extract
(ss_list, [xx, yy, zz]) = self._world.getTube(list_icluster,
sdwidth, z=z,
resolution=resolution)
# to plot
ss_plot = - np.inf * np.ones_like(ss_list[0])
# 1 :: blocks added
ss_added = ((ss_list[0] - ss_list[1])==-1)
# 2 :: blocks removed
ss_removed = ((ss_list[0] - ss_list[1])==1)
# 3 :: present in both
ss_neutral = ((ss_list[0] + ss_list[1])==2)
ss_plot[ss_added] = 1.
ss_plot[ss_removed] = -1.
ss_plot[ss_neutral] = 0.
#plt.contourf(xx, yy, ss_plot, levels=[-np.inf, -1., 0., 1., np.inf], colors='none', hatches=['//', '.', '/'], **kwargs)
plt.contourf(xx,
yy,
ss_plot,
levels=[-np.inf, -1., 0., 1., np.inf],
colors=('r','b','g'),
alpha=alpha,
**kwargs)
for i in [1, 2, 3]:
if i == 1:
ss1 = 1.*ss_removed
color = 'r'
elif i == 2:
ss1 = 1.*ss_added
color = 'g'
elif i == 3:
ss1 = 1.*ss_neutral
color = 'b'
# plot an iso surface
plt.contour(xx, yy, ss1, levels=[0.5], colors=color)
## Plot the log-likehood of confidence regions -- which can be related to traffic complexity in the future
# @param self object pointer
# @param list_icluster list of clusters to compare
# @param pmin minimum value on a normalised scale
# @param pmax maximum value on a normalised scale
# @param z if specified, it evaluates the confidence region at a constant altitude for 3D trajectories
# @param resolution specify resolution of region
def plotLogLikelihood(self,
list_icluster=None,
pmin=0, pmax=1,
z=None,
resolution=None):
# check validity
list_icluster = self._world._check_list_icluster(list_icluster)
(ss_list, [xx, yy, zz]) = self._world.getLogLikelihood(list_icluster,
resolution,
z)
ss = ss_list[0] # initialise
for ss1 in ss_list:
# find those greater
mask = np.greater(ss1, ss)
# replace
ss[mask] = ss1[mask]
# normalise
ss_norm = (ss - np.min(ss)) / ( | np.max(ss) | numpy.max |
import numpy as np
def remove_dependent_variables(x, tol = np.finfo(np.float).eps):
"""
Find independent columns using QR decomposition. The returned solution might
not unique. There might be other subset of independent columns. Strict
condition number of rows (m) > number of columns (n)
:param x: The input numpy array
:param tol: Tolerance, variables less than tol are removed
:return: The linearly independent subset of variables
"""
r = np.linalg.matrix_rank(x)
n = x.shape[1]
assert(r is not n), 'Matrix is already linearly independent'
q, r = np.linalg.qr(x)
ind = np.where(np.abs(r.diagonal()) > tol)[0]
return(ind, x[:, ind])
if __name__ == '__main__':
"""
Simple use case
"""
print('Define Matrix')
A = | np.array([[2, 4, 1, 3], [-1, -2, 1, 0], [0, 0, 4, 4], [3, 6, 2, 5]]) | numpy.array |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Tools related to handling overlaps.
Currently implemented to be used with a minimap (https://github.com/lh3/minimap) file.
Includes overlap class, functions that create a sparse matrix from the overlaps,
and a function that computes the absolute positions of the reads from the overlaps in a contig
if the ordering of the reads is given.
@author: <NAME>
"""
import numpy as np
from scipy.sparse import find
class MiniOvl:
""" Overlap between two reads, named 1 and 2, from line of minimap file.
Such a line contains :
query name, length, 0-based start, end, strand,
target name, length, start, end, the number of matching bases.
Parameters
----------
mini_line : str (line from minimap file)
Attributes
----------
id1 : str (read id of read 1)
id2 : str (read id of read 2)
len1 : int (length of read 1)
len2 : int (length of read 2)
b1 : int (basepair number of the beginning of the overlap on read 1)
e1 : int (basepair number of the end of the overlap on read 1)
b2 : int (basepair number of the beginning of the overlap on read 2)
e2 : int (basepair number of the end of the overlap on read 2)
strand : char ('+' if the two reads are on same strand and '-' otherwise)
n_match : int (number of matching bases (see minimap [https://github.com/lh3/minimap] documentation))
"""
def __init__(self, mini_line):
fields = mini_line.split()
self.id1 = fields[0]
self.len1 = int(fields[1])
self.b1 = int(fields[2])
self.e1 = int(fields[3])
self.strand = fields[4]
self.id2 = fields[5]
self.len2 = int(fields[6])
self.b2 = int(fields[7])
self.e2 = int(fields[8])
self.n_match = int(fields[9])
# self.n_coll = int(fields[10])
# self.n_frac_match = int(fields[11])
def switch_ids(self, id1, id2):
""" Switch reads in the overlap object (read 1 becomes 2 and 2 becomes 1). """
if (self.id1 == id2) and (self.id2 == id1):
self.id1, self.id2 = self.id2, self.id1
self.len1, self.len2 = self.len2, self.len1
self.b1, self.b2 = self.b2, self.b1
self.e1, self.e2 = self.e2, self.e1
else:
assert self.id1 == id1 and self.id2 == id2, u"id1 : {}, id2 : {} \n self.id1 : {}, self.id2 : {}".format(
id1, id2, self.id1, self.id2)
def compute_abs_pos(self, b_ref, s_ref):
""" Compute absolute position and strand of read 2 from overlap information (self) and absolute
position and strand of read 1 (b_ref and s_ref).
Parameters
----------
b_ref : int (absolute position (leftmost base coordinate) of read 1
s_ref : int (+1 or -1. Absolute strand of read 1)
Returns
----------
b : int (absolute position (leftmost base coordinate) of read 2)
s : int (+1 or -1. Absolute strand of read 2)
"""
# Compute strand of next read
s = s_ref if self.strand == '+' else not(s_ref)
# Compute leftmost position (depending of strands of reference and next read)
if (s_ref and s):
b = b_ref + self.b1 - self.b2
elif (s_ref and not(s)):
b = b_ref + self.b1 - (self.len2 - self.e2)
elif (not(s_ref) and s):
b = b_ref + (self.len1 - self.e1) - self.b2
elif (not(s_ref) and not(s)):
b = b_ref + (self.len1 - self.e1) - (self.len2 - self.e2)
return (b, s)
def compute_overlaps(mini_fn, record_list):
""" Compute list of overlaps from minimap output file and list of reads.
Parameters
----------
mini_fn : str (path to minimap file)
record_list : list (list of reads in Bio.SeqIO.records format)
Returns
----------
read_nb2id : dict (keys : read number, values : read id)
ovl_list : list (of overlaps as MiniOvl objects)
i_list : list (of read indices (int) i to build sparse coo_matrix such that A[i,j] ~ overlap between reads i and j)
j_list : list (of read indices (int) j to build sparse coo_matrix such that A[i,j] ~ overlap between reads i and j)
k_list : list (of indices (int) k such that ovl_list[k] is the overlap between i_list[k] and j_list[k])
n_match_list : list (of number of matches (int) such that A[i,j] = number of matches between i and j)
ovl_len_list : list (of length of overlap between i and j)
n_reads : int (number of reads)
"""
# Construct {read name : read number} dictionary
read_nb_dic = {}
cpt = 0
for record in record_list:
if read_nb_dic.has_key(record.id):
msg = "Same id {} for reads {} and {} ! " \
"Run [https://github.com/antrec/spectrassembler/]check_reads.py "\
"on your data first.".format(record.id, read_nb_dic[record.id], cpt)
raise StandardError(msg)
read_nb_dic[record.id] = cpt
cpt += 1
n_reads = cpt
idx = 0
h_list = []
k_list = []
ovl_list = []
n_match_list = []
ovl_len_list = []
fh = open(mini_fn, 'rb')
for line in fh:
ovl = MiniOvl(line)
i_idx = read_nb_dic[ovl.id1]
j_idx = read_nb_dic[ovl.id2]
# Discard self matches
if i_idx == j_idx:
continue
# Keep 1D indexing : h = n*i + j
h_idx = n_reads*i_idx + j_idx
# Check if another overlap between i and j already exists
duplicate_cond = (h_idx in h_list[-300:])
if duplicate_cond:
dupl_idx = h_list[-300:].index(h_idx) + len(h_list) - min(300, len(h_list))
dupl_ovl = ovl_list[dupl_idx]
# Drop the overlap if the preexisting one is more significant
if dupl_ovl.n_match > ovl.n_match:
continue
# Replace the preexisting overlap by the new one otherwise
else:
n_match_list[dupl_idx] = dupl_ovl.n_match
ovl_len = (abs(dupl_ovl.e1 - dupl_ovl.b1) \
+ abs(dupl_ovl.e2 - dupl_ovl.b2))/2
ovl_len_list[dupl_idx] = ovl_len
continue
# Add the overlap if there was no other overlap between i and j
ovl_list.append(ovl)
h_list.append(h_idx)
k_list.append(idx)
idx += 1
n_match_list.append(ovl.n_match)
ovl_len = (abs(ovl.e1 - ovl.b1) + abs(ovl.e2 - ovl.b2))/2
ovl_len_list.append(ovl_len)
fh.close()
# Convert to numpy arrays
h_list = np.array(h_list)
n_match_list = | np.array(n_match_list) | numpy.array |
#!/usr/bin/env python3
from __future__ import print_function
from __future__ import division
import rospy
import rosbag
import math
import numpy as np
import matplotlib.pyplot as plt
from scipy import linalg
from nav_msgs.msg import Odometry
from geometry_msgs.msg import Quaternion
from sensor_msgs.msg import Imu
from tf.transformations import euler_from_quaternion, quaternion_from_euler
from mav_msgs.msg import Actuators
from waypoint_generation_library import WaypointGen
# TODO: make this critically damped by tuning the natural frequency
class PDControl(object):
""" Takes IMU and position data and publishes actuator commands based off a Proportional Derivative law"""
def __init__(self):
self.dlqrPublisher = rospy.Publisher("/uams/command/motor_speed", Actuators, queue_size=1)
# self.dlqrPublisher = rospy.Publisher("/neo11/command/motor_speed", Actuators, queue_size=1)
self.receivedImuQuat = Quaternion()
self.thrustConstant = 1.269e-05
self.momentConstant = 0.016754
self.g = 9.8 # [m/s^2]
self.m = 4.88 # [kg]
self.Ixx = 6.08870e-02 # [kg*m^2]
self.Iyy = 6.87913e-02 # [kg*m^2]
self.Izz = 1.48916e-01 # [kg*m^2]
gamma = self.thrustConstant / self.momentConstant
self.L = 0.2895 # [m]
# damping ratio (overdamped)
zeta = 2
zetaYaw = 1
# natural frequency
self.PI = 3.14159
wnAng = 13 # [rad/s]
wnAngYaw = 200
# attitude control gains calculation based on 2nd order system assumption
# proportional gain
# self.kpAngle = np.array(([self.Ixx * pow(wnAng, 2), # roll
# self.Iyy * pow(wnAng, 2), # pitch
# self.Izz * pow(wnAngYaw, 2)])) # yaw
# self.kpAngle = np.array([11.2, 11.2, 5713.2])
# self.kdAngle = np.array([ 1.12, 1.12, 16.56])
# self.kpAngle = np.array([11.2, 11.2, 5000])
# self.kdAngle = np.array([1.12, 1.12, 16.56])
self.kpAngle = np.array([20, 20, 5000])
self.kdAngle = np.array([11, 11, 160])
print(self.kpAngle)
# derivative gain
# self.kdAngle = np.array(([self.Ixx * zeta * wnAng, # roll
# self.Iyy * zeta * wnAng, # pitch
# self.Izz * 0.5 * zetaYaw * wnAngYaw])) # yaw
print(self.kdAngle)
# position control gains hand-tuned
# proportional gain
self.kpPos = np.array(([0.1, 0.1, 1]))
# derivative gain
self.kdPos = np.array(([0.1, 0.1, 1]))
# variable to keep track of the previous error in each state
self.prevRPYErr = | np.zeros((3, 1)) | numpy.zeros |
#!/usr/bin/env python
# -*- coding: iso-8859-15 -*-
######################## -*- coding: utf-8 -*-
"""Usage: plotfc.py INPUTFILE
Simple script to visualize output of m1qn3 with omode>0 as saved in INPUTFILE.
The script plots the cost function value minus the final (smallest) value
and the number of simulations as a function of iterations.
"""
import matplotlib.pyplot as plt
import numpy as np
import sys
from getopt import gnu_getopt as getopt
# parse command-line arguments
try:
optlist,args = getopt(sys.argv[1:], ':', ['verbose'])
assert len(args) == 1
except (AssertionError):
sys.exit(__doc__)
fname=args[0]
print("reading from "+fname)
def get_output (fname, mystring):
"""parse fname and get some numbers out"""
iters = []
simuls= []
fc = []
try:
f=open(fname)
except:
print(fname + " does not exist, continuing")
else:
for line in f:
if mystring in line:
ll = line.split()
iters.append( int(ll[2].replace(',','')))
simuls.append(int(ll[4].replace(',','')))
fc.append( float(ll[6].replace('D','e').replace(',','')))
return iters, simuls, fc
iters, simuls, fc = get_output(fname, "f=")
# sort out restarts
iters0 = | np.asarray(iters) | numpy.asarray |
# Copyright (C) 2020 <NAME>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import numpy as np
from gwbench.basic_constants import time_fac, REarth, AU, cLight
cos = np.cos
sin = np.sin
exp = np.exp
PI = np.pi
ap_symbs_string = 'f Mc tc ra dec psi gmst0'
locs = ('H', 'L', 'V', 'K', 'I', 'ET1', 'ET2', 'ET3', 'C', 'N', 'S')
#-----Check, location generation-----
def check_loc_gen(loc):
'''Check, what generation the locations is and return appropriate label.'''
if loc in ('H','L','V','K','I'):
return '2G'
elif loc in ('C','N','S','ET1','ET2','ET3'):
return '3G'
def detector_response(f,hf_pl,hf_cr,Mc,tc,ra,dec,psi,gmst0,loc,use_rot):
# input: f frequency domain [Hz]
# Mc chirp Mass [solar mass]
# tc time of coalescence [s]
# dec declination [rad]
# ra right ascencsion [rad]
# psi polarization angle [rad]
# gmst0 GreenwichMeanSiderialTime according to LAL
# loc location (and implied orientation) of a detector
# use_rot use frequency dependent time due to rotation of earth and SPA
#
# output: hf detector strain
Fp, Fc, Flp = antenna_pattern_and_loc_phase_fac(f,Mc,tc,ra,dec,psi,gmst0,loc,use_rot)
return Flp * (Fp * hf_pl + Fc * hf_cr)
def antenna_pattern_and_loc_phase_fac(f,Mc,tc,ra,dec,psi,gmst0,loc,use_rot):
# input: f frequency domain [Hz]
# Mc chirp Mass [solar mass]
# tc time of coalescence [s]
# dec declination [rad]
# ra right ascencsion [rad]
# psi polarization angle [rad]
# gmst0 GreenwichMeanSiderialTime according to LAL
# loc location (and implied orientation) of a detector
# use_rot use frequency dependent time due to rotation of earth and SPA
#
# output: Fp, Fc
half_period = 4.32e4
R = REarth
D, d = det_ten_and_loc_vec(loc, R)
if use_rot:
tf = tc - (5./256.)*(time_fac*Mc)**(-5./3.)*(PI*f)**(-8./3.)
else:
tf = 0
gra = (gmst0 + tf*PI/half_period) - ra
theta = PI/2. - dec
if isinstance(gra, np.ndarray):
r = np.array((cos(gra) * sin(theta), sin(gra) * sin(theta), cos(theta) * np.ones(len(gra))))
XX = np.transpose(np.array([ -cos(psi)*sin(gra) - sin(psi)*cos(gra)*sin(dec), -cos(psi)*cos(gra) + sin(psi)*sin(gra)*sin(dec), sin(psi)*cos(dec) * np.ones(len(gra)) ]))
YY = np.transpose(np.array([ sin(psi)*sin(gra) - cos(psi)*cos(gra)*sin(dec), sin(psi)*cos(gra) + cos(psi)*sin(gra)*sin(dec), cos(psi)*cos(dec) * np.ones(len(gra)) ]))
Fp = 0.5 * np.array([np.matmul(np.matmul(XX[i],D),XX[i]) - np.matmul(np.matmul(YY[i],D),YY[i]) for i in range(len(gra))])
Fc = 0.5 * np.array([np.matmul(np.matmul(XX[i],D),YY[i]) + np.matmul(np.matmul(YY[i],D),XX[i]) for i in range(len(gra))])
else:
r = np.array((cos(gra) * sin(theta), sin(gra) * sin(theta), cos(theta)))
XX = np.transpose(np.array([ -cos(psi)*sin(gra) - sin(psi)*cos(gra)*sin(dec), -cos(psi)*cos(gra) + sin(psi)*sin(gra)*sin(dec), sin(psi)*cos(dec) ]))
YY = np.transpose(np.array([ sin(psi)*sin(gra) - cos(psi)*cos(gra)*sin(dec), sin(psi)*cos(gra) + cos(psi)*sin(gra)*sin(dec), cos(psi)*cos(dec) ]))
Fp = 0.5 * (np.matmul(np.matmul(XX,D),XX) - np.matmul(np.matmul(YY,D),YY))
Fc = 0.5 * (np.matmul(np.matmul(XX,D),YY) + np.matmul(np.matmul(YY,D),XX))
return Fp, Fc, exp(1j * 2*PI * f * np.matmul(d,r))
def det_ten_and_loc_vec(loc, R):
i_vec = np.array((1,0,0))
j_vec = np.array((0,1,0))
k_vec = np.array((0,0,1))
et_vec2 = ( i_vec + np.sqrt(3.)*j_vec)/2.
et_vec3 = (-i_vec + np.sqrt(3.)*j_vec)/2.
alpha, beta, gamma = det_angles(loc)
EulerD1 = np.matmul(np.matmul(rot_mat(alpha,'k'), rot_mat(beta,'j')),rot_mat(gamma,'k'))
if loc in ('ET3','LISA3'):
eDArm1 = -1 * np.matmul(EulerD1,et_vec2)
eDArm2 = -1 * | np.matmul(EulerD1,et_vec3) | numpy.matmul |
import os
import numpy as np
from rs_embed import EmbeddingData
POSE_DIR = '/app/data/pose'
POSE_PATH = os.path.join(POSE_DIR, 'pose_binary.bin')
ID_PATH = os.path.join(POSE_DIR, 'pose_ids.bin')
POSE_DIM = 390
def _load():
id_file_size = os.path.getsize(ID_PATH)
assert id_file_size % 8 == 0, \
'Id file size is not a multiple of sizeof(u64)'
n = int(id_file_size / 8)
emb_file_size = os.path.getsize(POSE_PATH)
assert emb_file_size % 4 == 0, \
'Embedding file size is a multiple of sizeof(f32)'
d = int((emb_file_size / 4) / (id_file_size / 8))
assert emb_file_size % d == 0, \
'Embedding file size is a multiple of d={}'.format(d)
emb_data = EmbeddingData(ID_PATH, POSE_PATH, POSE_DIM)
assert emb_data.count() == n, \
'Count does not match expected: {} != {}'.format(n, emb_data.count())
return emb_data
_POSE_DATA = _load()
class PoseWrapper():
def __init__(self, keypoints, pose_id, labeler):
self.kp = np.array(keypoints).reshape(130, 3)
self.id = pose_id
self.labeler = labeler
POSE_KEYPOINTS = 18
FACE_KEYPOINTS = 70
HAND_KEYPOINTS = 21
Nose = 0
Neck = 1
RShoulder = 2
RElbow = 3
RWrist = 4
LShoulder = 5
LElbow = 6
LWrist = 7
RHip = 8
RKnee = 9
RAnkle = 10
LHip = 11
LKnee = 12
LAnkle = 13
REye = 14
LEye = 15
REar = 16
LEar = 17
Background = 18
def pose_keypoints(self):
return self.kp[:self.POSE_KEYPOINTS, :]
def face_keypoints(self):
return self.kp[self.POSE_KEYPOINTS:(self.POSE_KEYPOINTS + self.FACE_KEYPOINTS), :]
def hand_keypoints(self):
base = self.kp[self.POSE_KEYPOINTS + self.FACE_KEYPOINTS:, :]
return [base[:self.HAND_KEYPOINTS, :], base[self.HAND_KEYPOINTS:, :]]
def get(pose_meta_qs):
"""Generator of PoseMeta objects -> list of PoseWrapper objects."""
pose_meta_qs = list(pose_meta_qs)
ids = [p.id for p in pose_meta_qs]
# get returns list of (id, pose bytes)
result = _POSE_DATA.get(ids)
assert len(result) == len(pose_meta_qs), "{} != {}".format(
len(result), len(pose_meta_qs))
return [
PoseWrapper( | np.array(pose_id_bytes[1]) | numpy.array |
import sys
import numpy as np
import pytest
import polynomials_on_simplices.algebra.multiindex as multiindex
from polynomials_on_simplices.calculus.finite_difference import central_difference, central_difference_jacobian
from polynomials_on_simplices.calculus.polynomial.polynomials_calculus import derivative
from polynomials_on_simplices.calculus.polynomial.polynomials_simplex_bernstein_basis_calculus import (
integrate_bernstein_polynomial_unit_simplex)
from polynomials_on_simplices.geometry.mesh.simplicial_complex import opposite_sub_simplex, simplex_vertices
from polynomials_on_simplices.geometry.primitives.simplex import unit
from polynomials_on_simplices.polynomial.polynomials_base import get_dimension, polynomials_equal
from polynomials_on_simplices.polynomial.polynomials_monomial_basis import unique_identifier_monomial_basis
from polynomials_on_simplices.polynomial.polynomials_unit_simplex_bernstein_basis import (
PolynomialBernstein, bernstein_basis_fn, degree_elevated_bernstein_basis_fn, dual_bernstein_basis_fn,
dual_bernstein_basis_polynomial, dual_vector_valued_bernstein_basis, get_associated_sub_simplex,
unique_identifier_bernstein_basis, unit_polynomial, vector_valued_bernstein_basis, zero_polynomial)
from polynomials_on_simplices.probability_theory.uniform_sampling import nsimplex_sampling
def test_call():
# Test calling a scalar valued univariate polynomial
p = PolynomialBernstein([1, 1, 1], 2, 1)
value = p(0.5)
expected_value = 1
assert value == expected_value
# Test calling a vector valued univariate polynomial
p = PolynomialBernstein([[1, 1], [1, 1], [1, 1]], 2, 1)
value = p(0.5)
expected_value = | np.array([1, 1]) | numpy.array |
import numpy as NP
from astropy.io import fits
from astropy.io import ascii
import scipy.constants as FCNST
from scipy import interpolate
import matplotlib.pyplot as PLT
import matplotlib.colors as PLTC
import matplotlib.cm as CMAP
import matplotlib.animation as MOV
from matplotlib import ticker
from scipy.interpolate import griddata
import datetime as DT
import time
import progressbar as PGB
import healpy as HP
import geometry as GEOM
import interferometry as RI
import catalog as CTLG
import constants as CNST
import my_DSP_modules as DSP
import my_operations as OPS
import primary_beams as PB
import baseline_delay_horizon as DLY
import lookup_operations as LKP
import ipdb as PDB
## Input/output parameters
telescope_id = 'custom'
element_size = 0.74
element_shape = 'delta'
phased_array = True
if (telescope_id == 'mwa') or (telescope_id == 'mwa_dipole'):
element_size = 0.74
element_shape = 'dipole'
elif telescope_id == 'vla':
element_size = 25.0
element_shape = 'dish'
elif telescope_id == 'gmrt':
element_size = 45.0
element_shape = 'dish'
elif telescope_id == 'hera':
element_size = 14.0
element_shape = 'dish'
elif telescope_id == 'custom':
if (element_shape is None) or (element_size is None):
raise ValueError('Both antenna element shape and size must be specified for the custom telescope type.')
elif element_size <= 0.0:
raise ValueError('Antenna element size must be positive.')
elif telescope_id == 'mwa_tools':
pass
else:
raise ValueError('telescope ID must be specified.')
if telescope_id == 'custom':
if element_shape == 'delta':
telescope_id = 'delta'
else:
telescope_id = '{0:.1f}m_{1:}'.format(element_size, element_shape)
if phased_array:
telescope_id = telescope_id + '_array'
telescope_str = telescope_id+'_'
ground_plane = 0.3 # height of antenna element above ground plane
if ground_plane is None:
ground_plane_str = 'no_ground_'
else:
if ground_plane > 0.0:
ground_plane_str = '{0:.1f}m_ground_'.format(ground_plane)
else:
raise ValueError('Height of antenna element above ground plane must be positive.')
obs_mode = 'custom'
avg_drifts = False
beam_switch = False
snapshot_type_str = ''
if avg_drifts:
snapshot_type_str = 'drift_averaged_'
if beam_switch:
snapshot_type_str = 'beam_switches_'
n_sky_sectors = 4
sky_sector = 3 # if None, use all sky sector. Accepted values are None, 0, 1, 2, or 3
if sky_sector is None:
sky_sector_str = '_all_sky_'
n_sky_sectors = 1
sky_sector = 0
else:
sky_sector_str = '_sky_sector_{0:0d}_'.format(sky_sector)
Tsys = 90.0 # System temperature in K
freq = 185.0 * 1e6 # foreground center frequency in Hz
freq_resolution = 80e3 # in Hz
coarse_channel_resolution = 1.28e6 # in Hz
bpass_shape = 'bnw'
f_pad = 1.0
oversampling_factor = 1.0 + f_pad
n_channels = 384
nchan = n_channels
max_abs_delay = 2.5 # in micro seconds
window = n_channels * DSP.windowing(n_channels, shape=bpass_shape, pad_width=0, centering=True, area_normalize=True)
nside = 64
use_GSM = False
use_DSM = True
use_CSM = False
use_NVSS = False
use_SUMSS = False
use_MSS = False
use_GLEAM = False
use_PS = False
if use_GSM:
fg_str = 'asm'
elif use_DSM:
fg_str = 'dsm'
elif use_CSM:
fg_str = 'csm'
elif use_SUMSS:
fg_str = 'sumss'
elif use_GLEAM:
fg_str = 'gleam'
elif use_PS:
fg_str = 'point'
elif use_NVSS:
fg_str = 'nvss'
else:
fg_str = 'other'
antenna_file = '/data3/t_nithyanandan/project_MWA/MWA_128T_antenna_locations_MNRAS_2012_Beardsley_et_al.txt'
ant_locs = NP.loadtxt(antenna_file, skiprows=6, comments='#', usecols=(0,1,2,3))
bl, bl_id = RI.baseline_generator(ant_locs[:,1:], ant_id=ant_locs[:,0].astype(int).astype(str), auto=False, conjugate=False)
bl_length = NP.sqrt(NP.sum(bl**2, axis=1))
bl_orientation = NP.angle(bl[:,0] + 1j * bl[:,1], deg=True)
sortind = NP.argsort(bl_length, kind='mergesort')
bl = bl[sortind,:]
bl_length = bl_length[sortind]
bl_orientation = bl_orientation[sortind]
bl_id = bl_id[sortind]
n_bins_baseline_orientation = 4
n_bl_chunks = 32
baseline_chunk_size = 64
neg_bl_orientation_ind = bl_orientation < 0.0
# neg_bl_orientation_ind = NP.logical_or(bl_orientation < -0.5*180.0/n_bins_baseline_orientation, bl_orientation > 180.0 - 0.5*180.0/n_bins_baseline_orientation)
bl[neg_bl_orientation_ind,:] = -1.0 * bl[neg_bl_orientation_ind,:]
bl_orientation = NP.angle(bl[:,0] + 1j * bl[:,1], deg=True)
total_baselines = bl_length.size
baseline_bin_indices = range(0,total_baselines,baseline_chunk_size)
bl_chunk = range(len(baseline_bin_indices))
bl_chunk = bl_chunk[:n_bl_chunks]
bl = bl[:baseline_bin_indices[n_bl_chunks],:]
bl_length = bl_length[:baseline_bin_indices[n_bl_chunks]]
bl_orientation = bl_orientation[:baseline_bin_indices[n_bl_chunks]]
bl_id = bl_id[:baseline_bin_indices[n_bl_chunks]]
neg_bl_orientation_ind = bl_orientation > 90.0 + 0.5*180.0/n_bins_baseline_orientation
## Plot distribution of baseline lengths and distributions
bl_length_binsize = 20.0
bl_length_bins = NP.linspace(0.0, NP.ceil(bl_length.max()/bl_length_binsize) * bl_length_binsize, NP.ceil(bl_length.max()/bl_length_binsize)+1)
bl_orientation_binsize=180.0/(2*n_bins_baseline_orientation)
bl_orientation_bins = NP.linspace(bl_orientation.min(), bl_orientation.max(), 2*n_bins_baseline_orientation+1)
labels = []
labels += ['B{0:0d}'.format(i+1) for i in xrange(bl.shape[0])]
roifile = '/data3/t_nithyanandan/project_MWA/roi_info_'+telescope_str+ground_plane_str+snapshot_type_str+obs_mode+'_gaussian_FG_model_'+fg_str+sky_sector_str+'nside_{0:0d}_'.format(nside)+'Tsys_{0:.1f}K_{1:.1f}_MHz_{2:.1f}_MHz'.format(Tsys, freq/1e6, nchan*freq_resolution/1e6)+'.fits'
roi = RI.ROI_parameters(init_file=roifile)
telescope = roi.telescope
# telescope = {}
# telescope['id'] = telescope_id
# telescope['shape'] = element_shape
# telescope['size'] = element_size
# telescope['orientation'] = element_orientation
# telescope['ocoords'] = element_ocoords
# telescope['groundplane'] = ground_plane
fig = PLT.figure(figsize=(6,6))
ax1 = fig.add_subplot(211)
n, bins, patches = ax1.hist(bl_length, bins=bl_length_bins, histtype='step', lw=2, color='black')
ax1.xaxis.tick_top()
ax1.xaxis.set_label_position('top')
ax1.set_xlabel('Baseline Length [m]', fontsize=18, weight='medium')
ax1.set_ylabel('Number in bin', fontsize=18, weight='medium')
ax1.tick_params(which='major', length=18, labelsize=12)
ax1.tick_params(which='minor', length=12, labelsize=12)
for axis in ['top','bottom','left','right']:
ax1.spines[axis].set_linewidth(2)
xticklabels = PLT.getp(ax1, 'xticklabels')
yticklabels = PLT.getp(ax1, 'yticklabels')
PLT.setp(xticklabels, fontsize=15, weight='medium')
PLT.setp(yticklabels, fontsize=15, weight='medium')
ax2 = fig.add_subplot(212)
n, bins, patches = ax2.hist(bl_orientation, bins=bl_orientation_bins, histtype='step', lw=2, color='black')
ax2.set_xlabel('Baseline Orientation [deg]', fontsize=18, weight='medium')
ax2.set_ylabel('Number in bin', fontsize=18, weight='medium')
ax2.tick_params(which='major', length=18, labelsize=12)
ax2.tick_params(which='minor', length=12, labelsize=12)
for axis in ['top','bottom','left','right']:
ax2.spines[axis].set_linewidth(2)
xticklabels = PLT.getp(ax2, 'xticklabels')
yticklabels = PLT.getp(ax2, 'yticklabels')
PLT.setp(xticklabels, fontsize=15, weight='medium')
PLT.setp(yticklabels, fontsize=15, weight='medium')
PLT.savefig('/data3/t_nithyanandan/project_MWA/figures/baseline_properties.eps', bbox_inches=0)
PLT.savefig('/data3/t_nithyanandan/project_MWA/figures/baseline_properties.png', bbox_inches=0)
## Animation set up
backdrop_xsize = 100
fps = 0.5
interval = 100
animation_format = 'MP4'
if animation_format == 'MP4':
anim_format = '.mp4'
else:
anim_format = 'gif'
animation_file = None
if animation_file is None:
animation_file = '/data3/t_nithyanandan/project_MWA/animations/multi_baseline_noiseless_visibilities_'+snapshot_type_str+obs_mode+'_'+'{0:0d}'.format(n_bl_chunks*baseline_chunk_size)+'_baselines_{0:0d}_orientations_'.format(n_bins_baseline_orientation)+'gaussian_FG_model_'+fg_str+'_{0:0d}_'.format(nside)+'{0:.1f}_MHz_'.format(nchan*freq_resolution/1e6)+bpass_shape+'{0:.1f}'.format(oversampling_factor)+'_{0:0d}_sectors'.format(n_bins_baseline_orientation)
animation2_file = None
if animation2_file is None:
animation2_file = '/data3/t_nithyanandan/project_MWA/animations/delay_emission_map_'+snapshot_type_str+obs_mode+'_'+'{0:0d}'.format(n_bl_chunks*baseline_chunk_size)+'_baselines_{0:0d}_orientations_'.format(n_bins_baseline_orientation)+'gaussian_FG_model_'+fg_str+'_{0:0d}_'.format(nside)+'{0:.1f}_MHz_'.format(nchan*freq_resolution/1e6)+bpass_shape+'{0:.1f}'.format(oversampling_factor)+'_{0:0d}_sectors'.format(n_bins_baseline_orientation)
lags = None
skyvis_lag = None
vis_lag = None
# # progress = PGB.ProgressBar(widgets=[PGB.Percentage(), PGB.Bar(), PGB.ETA()], maxval=n_bl_chunks).start()
# # for i in range(0, n_bl_chunks):
# # infile = '/data3/t_nithyanandan/project_MWA/multi_baseline_visibilities_'+snapshot_type_str+obs_mode+'_baseline_range_{0:.1f}-{1:.1f}_'.format(bl_length[baseline_bin_indices[i]],bl_length[min(baseline_bin_indices[i]+baseline_chunk_size-1,total_baselines-1)])+'gaussian_FG_model_'+fg_str+'_{0:0d}_'.format(nside)+'{0:.1f}_MHz_'.format(nchan*freq_resolution/1e6)+bpass_shape+'{0:.1f}'.format(oversampling_factor)+'_part_{0:0d}'.format(i)
# # hdulist = fits.open(infile+'.fits')
# # # extnames = [hdu.header['EXTNAME'] for hdu in hdulist]
# # if i == 0:
# # lags = hdulist['SPECTRAL INFO'].data.field('lag')
# # vis_lag = hdulist['real_lag_visibility'].data + 1j * hdulist['imag_lag_visibility'].data
# # skyvis_lag = hdulist['real_lag_sky_visibility'].data + 1j * hdulist['imag_lag_sky_visibility'].data
# # latitude = hdulist[0].header['latitude']
# # pointing_coords = hdulist[0].header['pointing_coords']
# # pointings_table = hdulist['POINTING INFO'].data
# # lst = pointings_table['LST']
# # n_snaps = lst.size
# # if pointing_coords == 'altaz':
# # pointings_altaz = NP.hstack((pointings_table['pointing_latitude'].reshape(-1,1), pointings_table['pointing_longitude'].reshape(-1,1)))
# # pointings_hadec = GEOM.altaz2hadec(pointings_altaz, latitude, units='degrees')
# # pointings_dircos = GEOM.altaz2dircos(pointings_altaz, units='degrees')
# # elif pointing_coords == 'radec':
# # pointings_radec = NP.hstack((pointings_table['pointing_longitude'].reshape(-1,1), pointings_table['pointing_latitude'].reshape(-1,1)))
# # pointings_hadec = NP.hstack(((lst-pointings_radec[:,0]).reshape(-1,1), pointings_radec[:,1].reshape(-1,1)))
# # pointings_altaz = GEOM.hadec2altaz(pointings_hadec, latitude, units='degrees')
# # pointings_dircos = GEOM.altaz2dircos(pointings_altaz, units='degrees')
# # elif pointing_coords == 'hadec':
# # pointings_hadec = NP.hstack((pointings_table['pointing_longitude'].reshape(-1,1), pointings_table['pointing_latitude'].reshape(-1,1)))
# # pointings_radec = NP.hstack(((lst-pointings_hadec[:,0]).reshape(-1,1), pointings_hadec[:,1].reshape(-1,1)))
# # pointings_altaz = GEOM.hadec2altaz(pointings_hadec, latitude, units='degrees')
# # pointings_dircos = GEOM.altaz2dircos(pointings_altaz, units='degrees')
# # else:
# # vis_lag = NP.vstack((vis_lag, hdulist['real_lag_visibility'].data + 1j * hdulist['imag_lag_visibility'].data))
# # skyvis_lag = NP.vstack((skyvis_lag, hdulist['real_lag_sky_visibility'].data + 1j * hdulist['imag_lag_sky_visibility'].data))
# # hdulist.close()
# # progress.update(i+1)
# # progress.finish()
# progress = PGB.ProgressBar(widgets=[PGB.Percentage(), PGB.Bar(), PGB.ETA()], maxval=n_bl_chunks).start()
# for i in range(0, n_bl_chunks):
# infile = '/data3/t_nithyanandan/project_MWA/multi_baseline_visibilities_'+snapshot_type_str+obs_mode+'_baseline_range_{0:.1f}-{1:.1f}_'.format(bl_length[baseline_bin_indices[i]],bl_length[min(baseline_bin_indices[i]+baseline_chunk_size-1,total_baselines-1)])+'gaussian_FG_model_'+fg_str+'_{0:0d}_'.format(nside)+'{0:.1f}_MHz_'.format(nchan*freq_resolution/1e6)+bpass_shape+'{0:.1f}'.format(oversampling_factor)+'_part_{0:0d}'.format(i)
# if i == 0:
# ia = RI.InterferometerArray(None, None, None, init_file=infile+'.fits')
# hdulist = fits.open(infile+'.fits')
# latitude = hdulist[0].header['latitude']
# pointing_coords = hdulist[0].header['pointing_coords']
# pointings_table = hdulist['POINTING AND PHASE CENTER INFO'].data
# lst = pointings_table['LST']
# n_snaps = lst.size
# hdulist.close()
# if pointing_coords == 'altaz':
# pointings_altaz = NP.hstack((pointings_table['pointing_latitude'].reshape(-1,1), pointings_table['pointing_longitude'].reshape(-1,1)))
# pointings_hadec = GEOM.altaz2hadec(pointings_altaz, latitude, units='degrees')
# pointings_dircos = GEOM.altaz2dircos(pointings_altaz, units='degrees')
# elif pointing_coords == 'radec':
# pointings_radec = NP.hstack((pointings_table['pointing_longitude'].reshape(-1,1), pointings_table['pointing_latitude'].reshape(-1,1)))
# pointings_hadec = NP.hstack(((lst-pointings_radec[:,0]).reshape(-1,1), pointings_radec[:,1].reshape(-1,1)))
# pointings_altaz = GEOM.hadec2altaz(pointings_hadec, latitude, units='degrees')
# pointings_dircos = GEOM.altaz2dircos(pointings_altaz, units='degrees')
# elif pointing_coords == 'hadec':
# pointings_hadec = NP.hstack((pointings_table['pointing_longitude'].reshape(-1,1), pointings_table['pointing_latitude'].reshape(-1,1)))
# pointings_radec = NP.hstack(((lst-pointings_hadec[:,0]).reshape(-1,1), pointings_hadec[:,1].reshape(-1,1)))
# pointings_altaz = GEOM.hadec2altaz(pointings_hadec, latitude, units='degrees')
# pointings_dircos = GEOM.altaz2dircos(pointings_altaz, units='degrees')
# else:
# ia_next = RI.InterferometerArray(None, None, None, init_file=infile+'.fits')
# ia.concatenate(ia_next, axis=0)
# progress.update(i+1)
# progress.finish()
infile = '/data3/t_nithyanandan/project_MWA/'+telescope_str+'multi_baseline_visibilities_'+ground_plane_str+snapshot_type_str+obs_mode+'_baseline_range_{0:.1f}-{1:.1f}_'.format(bl_length[baseline_bin_indices[0]],bl_length[min(baseline_bin_indices[n_bl_chunks-1]+baseline_chunk_size-1,total_baselines-1)])+'gaussian_FG_model_'+fg_str+sky_sector_str+'nside_{0:0d}_'.format(nside)+'Tsys_{0:.1f}K_{1:.1f}_MHz_{2:.1f}_MHz'.format(Tsys, freq/1e6, nchan*freq_resolution/1e6)
ia = RI.InterferometerArray(None, None, None, init_file=infile+'.fits')
hdulist = fits.open(infile+'.fits')
latitude = hdulist[0].header['latitude']
pointing_coords = hdulist[0].header['pointing_coords']
pointings_table = hdulist['POINTING AND PHASE CENTER INFO'].data
lst = pointings_table['LST']
n_snaps = lst.size
hdulist.close()
if pointing_coords == 'altaz':
pointings_altaz = NP.hstack((pointings_table['pointing_latitude'].reshape(-1,1), pointings_table['pointing_longitude'].reshape(-1,1)))
pointings_hadec = GEOM.altaz2hadec(pointings_altaz, latitude, units='degrees')
pointings_dircos = GEOM.altaz2dircos(pointings_altaz, units='degrees')
elif pointing_coords == 'radec':
pointings_radec = NP.hstack((pointings_table['pointing_longitude'].reshape(-1,1), pointings_table['pointing_latitude'].reshape(-1,1)))
pointings_hadec = NP.hstack(((lst-pointings_radec[:,0]).reshape(-1,1), pointings_radec[:,1].reshape(-1,1)))
pointings_altaz = GEOM.hadec2altaz(pointings_hadec, latitude, units='degrees')
pointings_dircos = GEOM.altaz2dircos(pointings_altaz, units='degrees')
elif pointing_coords == 'hadec':
pointings_hadec = NP.hstack((pointings_table['pointing_longitude'].reshape(-1,1), pointings_table['pointing_latitude'].reshape(-1,1)))
pointings_radec = NP.hstack(((lst-pointings_hadec[:,0]).reshape(-1,1), pointings_hadec[:,1].reshape(-1,1)))
pointings_altaz = GEOM.hadec2altaz(pointings_hadec, latitude, units='degrees')
pointings_dircos = GEOM.altaz2dircos(pointings_altaz, units='degrees')
# pc = NP.asarray([90.0, 90.0]).reshape(1,-1)
# pc = NP.asarray([266.416837, -29.00781]).reshape(1,-1)
pc = NP.asarray([0.0, 0.0, 1.0]).reshape(1,-1)
pc_coords = 'dircos'
ia.phase_centering(phase_center=pc, phase_center_coords=pc_coords)
#################################################################################
# Find any negative orientation baselines and conjugate those visibilities
simdata_bl_orientation = NP.angle(ia.baselines[:,0] + 1j * ia.baselines[:,1], deg=True)
simdata_neg_bl_orientation_ind = simdata_bl_orientation < 0.0
simdata_bl_orientation[simdata_neg_bl_orientation_ind] += 180.0
ia.baselines[simdata_neg_bl_orientation_ind,:] = -ia.baselines[simdata_neg_bl_orientation_ind,:]
# ia.baseline_orientations[simdata_neg_bl_orientation_ind] = 180.0 + ia.baseline_orientations[simdata_neg_bl_orientation_ind]
ia.vis_freq[simdata_neg_bl_orientation_ind,:,:] = ia.vis_freq[simdata_neg_bl_orientation_ind,:,:].conj()
ia.skyvis_freq[simdata_neg_bl_orientation_ind,:,:] = ia.skyvis_freq[simdata_neg_bl_orientation_ind,:,:].conj()
ia.vis_noise_freq[simdata_neg_bl_orientation_ind,:,:] = ia.vis_noise_freq[simdata_neg_bl_orientation_ind,:,:].conj()
ia.delay_transform(f_pad, freq_wts=window) # delay transform re-estimate
lags = ia.lags
vis_lag = ia.vis_lag
skyvis_lag = ia.skyvis_lag
if max_abs_delay is not None:
small_delays_ind = NP.abs(lags) <= max_abs_delay * 1e-6
lags = lags[small_delays_ind]
vis_lag = vis_lag[:,small_delays_ind,:]
skyvis_lag = skyvis_lag[:,small_delays_ind,:]
## Delay limits re-estimation
delay_matrix = DLY.delay_envelope(ia.baselines, pointings_dircos, units='mks')
fig = PLT.figure(figsize=(6,8))
ax1 = fig.add_subplot(211)
# ax1.set_xlabel('Baseline Length [m]', fontsize=18)
# ax1.set_ylabel(r'lag [$\mu$s]', fontsize=18)
# dspec1 = ax1.pcolorfast(bl_length, 1e6*lags, NP.abs(skyvis_lag[:-1,:-1,0].T), norm=PLTC.LogNorm(vmin=NP.amin(NP.abs(skyvis_lag)), vmax=NP.amax(NP.abs(skyvis_lag))))
# ax1.set_xlim(bl_length[0], bl_length[-1])
# ax1.set_ylim(1e6*lags[0], 1e6*lags[-1])
ax1.set_xlabel('Baseline Index', fontsize=18)
ax1.set_ylabel(r'lag [$\mu$s]', fontsize=18)
dspec1 = ax1.imshow(NP.abs(skyvis_lag[:,:,0].T), origin='lower', extent=(0, skyvis_lag.shape[0]-1, NP.amin(lags*1e6), NP.amax(lags*1e6)), norm=PLTC.LogNorm(NP.amin(NP.abs(skyvis_lag)), vmax=NP.amax(NP.abs(skyvis_lag))), interpolation=None)
ax1.set_aspect('auto')
ax2 = fig.add_subplot(212)
# ax2.set_xlabel('Baseline Length [m]', fontsize=18)
# ax2.set_ylabel(r'lag [$\mu$s]', fontsize=18)
# dspec2 = ax2.pcolorfast(bl_length, 1e6*lags, NP.abs(skyvis_lag[:-1,:-1,1].T), norm=PLTC.LogNorm(vmin=NP.amin(NP.abs(skyvis_lag)), vmax=NP.amax(NP.abs(skyvis_lag))))
# ax2.set_xlim(bl_length[0], bl_length[-1])
# ax2.set_x=ylim(1e6*lags[0], 1e6*lags[-1])
ax2.set_xlabel('Baseline Index', fontsize=18)
ax2.set_ylabel(r'lag [$\mu$s]', fontsize=18)
dspec2 = ax2.imshow(NP.abs(skyvis_lag[:,:,1].T), origin='lower', extent=(0, skyvis_lag.shape[0]-1, | NP.amin(lags*1e6) | numpy.amin |
import gym
import copy
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as patches
class JapanMaze(object):
def __init__(self,radius=0.5,seed=0):
np.random.seed(seed=seed)
self.action_limit = 0.1
self.ini_posi = np.array([-0.9,-0.9])
self.ini_cov = np.array([[0.005,0.],[0.,0.005]])
self.whereami = copy.deepcopy(self.ini_posi)
self.goal = np.array([0.9,0.9])
self.reward_f = lambda y:np.exp(-(np.linalg.norm(y-self.goal)**2)/2)
self.center = np.array([0.0,0.0])
self.radius = radius
self.timelimit =40
self.N = 30 # Collision determination resolution
high = np.ones(2)*1
self.observation_space = gym.spaces.Box(low=-np.ones(2)*1, high=np.ones(2)*1,dtype=np.float32)
self.action_space = gym.spaces.Box(low=-np.ones(2)*0.1, high=np.ones(2)*0.1,dtype=np.float32)
def reset(self):
self.timestep = 0
self.whereami = np.random.multivariate_normal(self.ini_posi, self.ini_cov)
return self.whereami
def isvalid(self,wai):
return np.linalg.norm(self.center-wai) >= 0.5
def step_near_circle(self,ac):
wai = copy.deepcopy(self.whereami)
for i in range(1,self.N+1):
ratio = i/self.N
n_wai = self.whereami+ac*ratio
if not self.isvalid(n_wai): # 丸の中入ったら,一個前を返す
return wai
else:
wai = copy.deepcopy(n_wai)
return wai
def step(self,ac):
self.timestep +=1
ac = copy.deepcopy(np.array([max(-self.action_limit,min(ac[0],self.action_limit)),
max(-self.action_limit,min(ac[1],self.action_limit))]))
ac += np.random.normal(0.0, 0.005, 2)
nwai = self.whereami+ ac
nwai[0] = min(max(-1.,nwai[0]),1.)
nwai[1] = min(max(-1.,nwai[1]),1.)
if nwai[0] < 0.5 and nwai[0] > -0.5 and nwai[1] < 0.5 and nwai[1] > -0.5:
self.whereami = self.step_near_circle(ac)
else:
self.whereami = nwai
rew = self.reward_f(self.whereami)
return self.whereami, rew, self.timestep>=self.timelimit,{}
def render(self):
fig = plt.figure(figsize=(5,5))
ax = plt.axes()
ax.plot([-1,-1,1,1,-1],[-1,1,1,-1,-1],c='black')
c = patches.Circle(xy=(self.center[0], self.center[1]), radius=self.radius, fc='r', ec='r')
ax.add_patch(c)
ax.scatter(self.whereami[0],self.whereami[1],c='black')
ax.scatter(self.goal[0],self.goal[1],marker='x',c='black')
def calc_sum_rews(self,X):
return sum([self.reward_f(x[:2]) for x in X])
def vis_gpr(self,pilco,save_name=False):
posi = [[-0.7,-0.7],[0.7,-0.7],[-0.7,0.7],[0.7,0.7]]
th = lambda t:[np.cos(t)*0.1,np.sin(t)*0.1]
vec = [th(np.pi/2),th(np.pi/2 + 2/3*np.pi),th(np.pi/2 + 4/3*np.pi)]
posi_vec = np.array([p+v for p in posi for v in vec])
xmean,xvar = pilco.mgpr.models[0].predict_y(posi_vec)
ymean,yvar = pilco.mgpr.models[1].predict_y(posi_vec)
means = | np.hstack((xmean,ymean)) | numpy.hstack |
#
# Copyright (c) 2020, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from subprocess import Popen, PIPE
import cudf
import cupy as cp
import os
import time
import tabix
import numpy as np
import pandas as pd
from numba import cuda
from atacworks.dl4atac.models.models import DenoisingResNet
from atacworks.dl4atac.models.model_utils import load_model
import torch
def count_fragments(fragment_file):
"""
Counts number of fragments per barcode in fragment file.
Parameters
----------
fragment_file: path to gzipped fragment file
Returns
-------
barcode_counts: pandas DF with number of fragments per barcode.
"""
fragment_barcodes = pd.read_csv(fragment_file, compression='gzip', sep='\t', header=None, usecols=[3])
barcode_counts = fragment_barcodes.iloc[:,0].value_counts().reset_index()
barcode_counts.columns = ['cell', 'fragments']
return barcode_counts
def query_fragments(fragment_file, chrom, start, end):
"""
Counts number of fragments per barcode in fragment file.
Parameters
----------
fragment_file: path to fragment file
chrom: chromosome to query
start: start of query region
end: end of query region
Returns
-------
records: fragments in given region.
"""
tb = tabix.open(fragment_file)
results = tb.querys("%s:%d-%d" % (chrom, start, end))
records = []
for record in results:
records.append(record)
return records
def tabix_query(filename, chrom, start, end):
"""
Calls tabix and generate an array of strings for each line it returns.
Parameters
----------
filename: path to fragment file
chrom: chromosome to query
start: start of query region
end: end of query region
Returns
-------
records: fragments in given region.
"""
query = '{}:{}-{}'.format(chrom, start, end)
process = Popen(['tabix', '-f', filename, query], stdout=PIPE)
records = []
for line in process.stdout:
record = line.decode('utf-8').strip().split('\t')
records.append(record)
return records
def read_fragments(chrom, start, end, fragment_file):
"""
Creates a DF from the output of tabix_query.
Parameters
----------
filename: path to fragment file
chrom: chromosome to query
start: start of query region
end: end of query region
Returns
-------
fragments: DF containing fragments in given region.
"""
fragments = cudf.DataFrame(
data=tabix_query(fragment_file, chrom, start, end),
columns=['chrom', 'start', 'end', 'cell', 'duplicate'])
fragments.drop('duplicate', inplace=True, axis=1)
fragments['row_num'] = fragments.index
fragments = fragments.astype({"start": np.int32, "end": np.int32})
fragments['len'] = fragments['end'] - fragments['start']
return fragments
@cuda.jit
def expand_fragments(start, end, index, end_index,
interval_start, interval_end, interval_index, step):
"""
Expands fragments to high resolution intervals.
Parameters
----------
start: start of fragment
end: end of fragment
index: index of fragment
end_index: index of fragment end
interval_start: array to fill start of each interval
interval_end: array to fill end of each interval
interval_index: array to fill index of each interval
step: step size in bp
"""
i = cuda.grid(1)
# Starting position in the target frame
first_index = end_index[i] - (end[i] - start[i])
chrom_start = start[i]
for j in range(first_index, end_index[i], step):
interval_start[j] = chrom_start
chrom_start = chrom_start + 1
interval_end[j] = chrom_start
interval_index[j] = index[i]
def get_coverages(start, end, fragments):
"""
Calculates per-bp coverage per cluster.
Parameters
----------
start: start of selected region
end: end of selected region
fragments: DF containing fragments for selected region
Returns:
--------
coverage_array: numpy array containing coverage for each cluster
"""
# Copy fragments DF
fragments_copy = fragments.copy()
# Take cumulative sum of fragment lengths
cum_sum = fragments_copy['len'].cumsum()
expanded_size = cum_sum[len(fragments_copy) - 1].tolist()
# Create expanded fragment dataframe
expanded_fragments = cudf.DataFrame()
start_arr = cp.zeros(expanded_size, dtype=cp.int32)
end_arr = cp.zeros(expanded_size, dtype=cp.int32)
rownum_arr = cp.zeros(expanded_size, dtype=cp.int32)
# Expand all fragments to single-bp resolution
expand_fragments.forall(fragments_copy.shape[0], 1)(
fragments_copy['start'],
fragments_copy['end'],
fragments_copy['row_num'],
cum_sum,
start_arr,
end_arr,
rownum_arr,
1)
expanded_fragments['start'] = start_arr
expanded_fragments['end'] = end_arr
expanded_fragments['row_num'] = rownum_arr
fragments_copy.drop(['start', 'end'], inplace=True, axis=1)
expanded_fragments = expanded_fragments.merge(fragments_copy, on='row_num')
# Count number of fragments at each position
coverage_df = expanded_fragments.groupby(['chrom', 'start', 'end', 'cluster'], as_index=False).count()
# List all clusters
clusters = sorted(np.unique(fragments_copy['cluster'].to_array()))
num_clusters = len(clusters)
# Create empty array
coverage_array = np.zeros(shape=(num_clusters, (end - start)))
# Iterate over clusters to add coverage values
for (i, cluster) in enumerate(clusters):
cluster_df = coverage_df.loc[coverage_df['cluster'] == cluster]
coords = cluster_df['start'] - start
values = cluster_df['row_num']
ind = (coords >= 0) & (coords < (end-start))
coords = coords[ind].values.get()
values = values[ind].values.get()
coverage_array[i][coords] = values
return coverage_array
def load_atacworks_model(weights_path, gpu, interval_size=50000):
"""
Loads pre-trained AtacWorks resnet model.
Parameters
----------
weights_path: path to hdf5 file containing model weights.
gpu: Index of GPU on which to load model.
interval_size: interval size parameter for resnet model
Returns:
--------
model: AtacWorks resnet model to be used for denoising and peak calling.
"""
model = DenoisingResNet(interval_size=interval_size, kernel_size=51, kernel_size_class=51)
model = load_model(model, weights_path=weights_path, rank=0)
model = model.cuda(gpu)
return model
def reshape_with_padding(coverage, interval_size, pad):
"""
Reshapes array of coverage values for AtacWorks model.
Parameters
----------
coverage: array of coverage values per cluster.
interval_size: interval_size parameter for AtacWorks model.
pad: pad parameter for AtacWorks model
Returns:
--------
reshaped coverage: reshaped array of coverage values.
"""
if(len(coverage.shape)==1):
coverage = coverage.reshape((1, coverage.shape[0]))
# Calculate dimensions of empty array
num_clusters = int(coverage.shape[0])
n_intervals = int((coverage.shape[1] - 2*pad) / interval_size)
padded_interval_size = int(interval_size + 2*pad)
# Create empty array to fill in reshaped coverage values
reshaped_coverage = | np.zeros(shape=(num_clusters*n_intervals, padded_interval_size)) | numpy.zeros |
import numpy as np
from scipy.sparse.csgraph import minimum_spanning_tree, connected_components
def euclidean_mst(X, neighbors_estimator, verbose=2):
n_neighbors = min(2, X.shape[0])
while True:
# make sure we have a connected minimum spanning tree.
# otherwise we need to consider more neighbors
n_neighbors = 2 * n_neighbors
if verbose > 1:
print("Trying to build mst with %d neighbors." % n_neighbors)
distances = neighbors_estimator.kneighbors_graph(
X, n_neighbors=n_neighbors, mode='distance')
n_components, component_indicators =\
connected_components(distances + distances.T)
if len(np.unique(component_indicators)) > 1:
continue
distances.sort_indices()
forest = minimum_spanning_tree(distances)
_, inds = connected_components(forest + forest.T)
assert(len( | np.unique(inds) | numpy.unique |
import numpy as np
import os
from scipy.io import loadmat
from scipy.special import kv, iv
from numpy import pi, real, imag, exp, sqrt, sum, sin, cos
# see <NAME>., and <NAME>. "Stokes flow due to a Stokeslet in a pipe."
# Journal of Fluid Mechanics 86.04 (1978): 727-744.
# class containing functions for detailed expression
# noinspection PyTypeChecker
class detail:
def __init__(self, threshold, b):
self._threshold = threshold
self._b = b
self._k = np.zeros([0])
self._n = np.zeros([0])
self._xn = np.zeros([0])
self._yn = np.zeros([0])
self._DmyD_xn = np.zeros([0])
self._DmyD_yn = np.zeros([0])
self._xn_k0 = np.zeros([0])
self._yn_k0 = np.zeros([0])
self._DmyD_xn_k0 = np.zeros([0])
self._DmyD_yn_k0 = np.zeros([0])
self._psi_xn1 = np.zeros([0])
self._psi_xn2 = np.zeros([0])
self._psi_xn3 = np.zeros([0])
self._pi_xn1 = np.zeros([0])
self._pi_xn2 = np.zeros([0])
self._pi_xn3 = | np.zeros([0]) | numpy.zeros |
# Copyright (C) 2022 <NAME>, <NAME>, <NAME>
# Code -- Scaling up Ranking under Constraints for Live Recommendations by Replacing Optimization with Prediction
# https://github.com/computationalmarketing/scalable_ranking_under_constraints/
# Code running ranking of 50 news articles
from core_functions_unbalanced import *
import numpy as np
import pandas as pd
import cvxopt
from scipy.optimize import linear_sum_assignment
import time
import cvxpy as cp
from multiprocessing import Pool
from tqdm import tqdm
from matplotlib import pyplot as plt
from scipy import sparse
import matplotlib.tri as tri
from sklearn.linear_model import BayesianRidge
from sklearn.neighbors import KNeighborsRegressor
import math
import json
import seaborn as sns
sns.set_theme(style="whitegrid")
import os
if not os.path.exists('../results'):
os.makedirs('../results')
PATH_RESULTS = '../results/yow-dataset-50'
if not os.path.exists(PATH_RESULTS):
os.makedirs(PATH_RESULTS)
# load data
PATH_DATA = '../data/yow-dataset'
ratings = pd.read_csv(PATH_DATA + '/generated_data.csv')
ratings.shape
# very important to sort
# we can use the special structure of the problem to speed up computation
ratings = ratings.sort_values('relevant', ascending=False)
# dummy code clusters
ratings['userClust1'] = 1*(ratings['userClust']==1)
ratings['userClust2'] = 1*(ratings['userClust']==2)
# number of unique movies
ratings['DOC_ID'].unique().shape
def extract_data(user, top_k, sample_size):
# data extraction function
# user is user id in range(1000)
# sample - whether to extract sample prop of user observations only
# top_k - across what items to measure the utility/exposure
# user data
ratings_u = ratings[ratings['user_id']==user]
# for each user, optimize only across top sample_size items
if sample_size:
ratings_u = ratings_u.iloc[:sample_size]
n = ratings_u.shape[0]
# utilities and constraints
exposure = np.array([[1.0/np.log2(i+1.0) for i in range(1,top_k+1)]])#np.array([[1.0 for i in range(top_k)]])#
# taking dot product with identical discounting
U = | np.dot(ratings_u['relevant'].values[:,np.newaxis], exposure) | numpy.dot |
import numpy as np
from baselines import util
import os
import copy
import nltk
#import crf
import scipy.special
import sklearn
class HMM:
"""
Hidden Markov Model
"""
def __init__(self, n, m):
"""
fix n, m
:param n: number of states
:param m: number of observations
"""
self.n = n
self.m = m
self.t = np.zeros((n, n))
self.e = np.zeros((n, m))
self.start = np.asarray([1.0 / n] * n)
def pr_obs(self, i, list_features, t=None):
"""
:param i: state
:param list_features:
:param t: time, not used here
:return: probability of observing the features in state i
"""
res = 1
for f in list_features:
res *= self.e[i, f]
return res
def decode(self, a, include_crowd_obs=False):
"""
Viterbi decoding
:param a: seq of observations, each observation is a list of features
:return:
"""
l = len(a)
if l == 0:
return []
# c[t][i] = prob of best path time t, at state i
c = np.zeros((l, self.n))
c[0] = np.copy(self.start) # * self.e[:, a[0]]
# print self.n, c.shape
for i in range(self.n):
c[0][i] *= self.pr_obs(i, a[0])
# b[t][i] = backpointer
b = np.zeros((l, self.n))
for t in range(1, l, 1): # time
ob = a[t]
for i in range(self.n): # current state
for j in range(self.n): # previous state
# todo: change to log scale
p = c[t - 1][j] * self.t[j, i] * self.pr_obs(i, ob)
if include_crowd_obs:
p *= self.pr_crowd_labs(t, i, self.current_list_cl)
# print t, i, j, p
if p > c[t][i]:
c[t][i] = p
b[t][i] = j
# normalise otherwise p ends up as zeros with long sequences
c_t_total = 0
for i in range(self.n):
c_t_total += c[t][i]
for i in range(self.n):
c[t][i] /= c_t_total
res = np.zeros((l,))
# trace
p = 0
for i in range(self.n):
if c[l - 1][i] > p:
p = c[l - 1][i]
res[l - 1] = i
seq_prob = p
for t in range(l - 2, -1, -1):
res[t] = b[int(t + 1), int(res[t + 1])]
# print c
# print b
return res, seq_prob
def learn(self, sentences, smooth=0.001):
"""
learn parameters from labeled data
:param sentences: list of sentence, which is list of instance
:return:
"""
# counting
self.t = smooth * np.ones((self.n, self.n))
self.e = smooth * np.ones((self.n, self.m))
self.start = smooth * np.ones((self.n,))
for sentence in sentences:
if len(sentence) > 0:
i = sentence[0]
self.start[i.label] += 1
prev = -1 # previous state
for i in sentence:
state = i.label
if prev != -1:
self.t[prev][state] += 1
for f in i.features:
self.e[state][int(f)] += 1
prev = state
# save count for e
self.count_e = copy.deepcopy(self.e)
# normalizing
self.start = self.start * 1.0 / np.sum(self.start)
for i in range(self.n):
self.t[i] = self.t[i] * 1.0 / np.sum(self.t[i])
self.e[i] = self.e[i] * 1.0 / np.sum(self.e[i])
def decode_all(self, sentences):
self.res = []
self.res_prob = []
for s in sentences:
mls, mls_prob = self.decode(util.get_obs(s))
self.res.append(mls)
self.res_prob.append(mls_prob)
##########################################################################
##########################################################################
##########################################################################
##########################################################################
class WorkerModel:
"""
model of workers
"""
def __init__(self, n_workers = 47, n_class = 10, smooth = 0.001, ne = 9, rep = 'cv'):
"""
:param n_workers:
:param n_class:
:param smooth:
:param ne:
:param rep: representation. cv2 = confusion vec of accuracy in two cases: non-entity/ entity
"""
self.n_workers = n_workers
self.n = n_class
self.smooth = smooth
self.ne = ne
self.rep = rep
def learn_from_pos(self, data, pos):
"""
:param data: crowd_data
:param pos: sentence posterior
:return:
"""
count = self.smooth * np.ones( (self.n_workers, self.n, self.n))
for i, sentence in enumerate(data.sentences):
for j in range(len(sentence)):
for l, w in data.get_lw(i, j):
for k in range(self.n): # 'true' label = k
count[w][k][l] += pos[i][j][k]
self.learn_from_count(count)
def learn_from_count(self, count):
"""
:return:
"""
#save the count for debug
self.count = count
if self.rep == 'cv2':
ne = self.ne
self.cv = np.zeros((self.n_workers, 2))
for w in range(self.n_workers):
self.cv[w][0] = count[w][ne][ne] * 1.0 / np.sum(count[w][ne]) # accuracy for ne class
cc = self.smooth; cw = self.smooth # count for correct and wrong for non ne classes
for i in range(self.n):
if i != ne:
cc += count[w][i][i]
cw += np.sum(count[w][i]) - count[w][i][i]
self.cv[w][1] = cc * 1.0 / (cc + cw)
elif self.rep == 'cv':
self.cv = np.zeros((self.n_workers, self.n))
for w in range(self.n_workers):
if np.mod(w, 100) == 0:
print('M-step, processing worker counts %i of %i' % (w, self.n_workers))
for i in range(self.n):
self.cv[w][i] = count[w][i][i] * 1.0 / np.sum(count[w][i]) # accuracy for ne class
elif self.rep == 'cm_sage':
self.cm = np.zeros((self.n_workers, self.n, self.n))
# background dist
m = np.sum(count, axis=0)
for i in range(self.n): m[i] = m[i] * 1.0 / np.sum(m[i])
m = np.log(m)
for w in range(self.n_workers):
for i in range(self.n):
temp = additive.estimate(count[w][i], m[i])
temp = np.reshape(temp, (self.n,) )
self.cm[w][i] = np.exp(temp + m[i])
self.cm[w][i] = self.cm[w][i] * 1.0 / np.sum(self.cm[w][i])
else:
self.cm = np.zeros((self.n_workers, self.n, self.n))
for w in range(self.n_workers):
for k in range(self.n):
self.cm[w][k] = count[w][k] * 1.0 / np.sum(count[w][k])
def get_prob(self, w, true_lab, lab):
"""
:param w: worker
:param true_lab:
:param lab:
:return: probability of response lab given true label
"""
#return 1.0
if self.rep == 'cv2':
if self.ne == true_lab:
if true_lab == lab:
return self.cv[w][0]
else:
return (1 - self.cv[w][0]) / float(self.n - 1)
else:
if true_lab == lab:
return self.cv[w][1]
else:
return (1 - self.cv[w][1]) / float(self.n - 1)
elif self.rep == 'cv':
if true_lab == lab:
return self.cv[w][true_lab]
else:
return (1 - self.cv[w][true_lab]) / float(self.n - 1)
elif self.rep == 'cm_sage':
return self.cm[w][true_lab][lab]
else:
return self.cm[w][true_lab][lab]
class HMM_crowd(HMM):
def __init__(self, n, m, data, features, labels, n_workers=47, init_w=0.9, smooth=0.001, smooth_w=10, ne = 9, vb = None):
"""
:param data: util.crowd_data with crowd label
:return:
"""
HMM.__init__(self, n, m)
self.data = data
self.smooth = smooth
self.n_workers = n_workers
self.ep = 1e-300
self.features = features
self.labels = labels
self.init_w = init_w
self.ne = ne
#self.wsen = np.zeros((n_workers,))
#self.wspe = np.zeros((n_workers,))
self.wca = np.zeros((n, n_workers))
#self.ne = labels['O'] # id of 'non entity' label
self.ne = ne
self.smooth_w = smooth_w
self.n_sens = len(data.sentences)
self.vb = vb
def pr_crowd_labs(self, t, i, list_cl):
"""
:param t: time
:param i: the state
:param list_cl: list of util.crowddlab
:return: probability of observing crowd labels at state i
"""
res = 1# * self.prior[i]
for cl in list_cl:
wid = cl.wid
sen = cl.sen
lab = sen[t] # crowd label
# if i == self.ne:
# res *= self.wspe[wid] if lab == i else 1 - self.wspe[wid] # specificity
# else:
# res *= self.wsen[wid] if lab == i else 1 - self.wsen[wid] #
# sensitivity
#res *= self.wca[i, wid] if lab == i else 1 - self.wca[i, wid]
#res *= self.wa[wid][i][lab]
res *= self.wm.get_prob(wid, i, lab)
return res
def inference(self, sentence, list_cl, return_ab=False):
T = len(sentence) # number of timesteps
alpha = np.zeros((T, self.n)) # T * states
beta = np.zeros((T, self.n))
# alpha (forward):
for i in range(self.n):
alpha[0][i] = self.pr_obs(
i, sentence[0].features) * self.pr_crowd_labs(0, i, list_cl) * self.start[i]
for t in range(1, T, 1):
ins = sentence[t]
alpha_t_sum = 0
for i in range(self.n): # current state
alpha[t][i] = 0
for j in range(self.n): # previous state
alpha[t][i] += self.pr_obs(i, ins.features) * self.t[j][i] * alpha[t - 1][j] \
* self.pr_crowd_labs(t, i, list_cl)
alpha_t_sum += alpha[t][i]
# normalise
for i in range(self.n):
alpha[t][i] /= alpha_t_sum
# beta (backward):
for i in range(self.n):
beta[T - 1][i] = self.pr_obs(i, sentence[T - 1].features) * \
self.pr_crowd_labs(T - 1, i, list_cl)
for t in range(T - 2, -1, -1):
ins = sentence[t + 1]
beta_t_sum = 0
for i in range(self.n): # current state
beta[t][i] = 0
for j in range(self.n): # next state
beta[t][i] += self.pr_obs(j, ins.features) * self.t[i][j] * beta[t + 1][j] \
* self.pr_crowd_labs(t + 1, j, list_cl)#\
#* (self.start[i] if t == 0 else 1)
beta_t_sum += beta[t][i]
for i in range(self.n):
beta[t][i] /= beta_t_sum
if return_ab:
return (alpha, beta)
sen_posterior = []
# update counts
p = np.zeros((self.n,))
for t in range(T):
for i in range(self.n):
p[i] = self.ep + alpha[t][i] * beta[t][i]
p = p * 1.0 / np.sum(p) # normalilze
#save the posterior
sen_posterior.append(p.copy())
if t == 0: # update start counts
self.count_start += p
# update prior count
#self.count_prior += p
# update emission counts
ins = sentence[t]
for i in range(self.n):
for f in ins.features:
self.count_e[i][f] += p[i]
# update crowd params counts
for i in range(self.n): # state
for cl in list_cl:
wid = cl.wid
# worker ans
lab = cl.sen[t]
# if i == self.ne:
# if lab == self.ne:
# self.count_spe[wid][0] += p[i]
# else:
# self.count_spe[wid][1] += p[i]
# else:
# if lab == self.ne:
# self.count_sen[wid][0] += p[i]
# else:
# self.count_sen[wid][1] += p[i]
#if lab == i:
# self.count_wa[i, wid][1] += p[i]
#else:
# self.count_wa[i, wid][0] += p[i]
self.count_wa[wid][i][lab] += p[i]
trans_pos = []
# update transition counts
for t in range(T - 1):
p = np.zeros((self.n, self.n))
ins = sentence[t+1]
for i in range(self.n): # state at time t
for j in range(self.n): # state at time t+1
p[i][j] = self.ep + alpha[t][i] * self.t[i][j] * self.pr_obs(j, ins.features) \
* self.pr_crowd_labs(t + 1, j, list_cl) * beta[t + 1][j]
# update transition counts
p = p * 1.0 / | np.sum(p) | numpy.sum |
import pandas as pd
import os
import numpy as np
import argparse
import warnings
import matplotlib.pyplot as plt
import seaborn as sns
from matplotlib.lines import Line2D
from matplotlib.colors import ListedColormap, Normalize
parser = argparse.ArgumentParser('Plot Odds (Bayes) Ratio for bins')
parser.add_argument('file', type=str,
metavar='DF',
help='Location where pkl file saved')
parser.add_argument('--fig-size', type=float, default=4,
help='Figure size (inches)')
parser.add_argument('--font-size',type=float, default=20)
parser.add_argument('--no-show', action='store_false', dest='show')
parser.add_argument('--show', action='store_true', dest='show')
parser.add_argument('--dpi', type=int, default=80)
parser.add_argument('--save', action='store_true', dest='save')
parser.add_argument('--no-save', action='store_false',dest='save')
parser.add_argument('--name', type=str, default='br.pdf', help='file name for saving')
parser.add_argument('--nbins', type=int, default=100)
parser.add_argument('--yvar', type=str, nargs='+', default=['model_entropy'])
parser.add_argument('--xvar', type=str, default='rank')
parser.add_argument('--xbins', type=float, default=[], nargs='*')
parser.add_argument('--ybins', type=float, default=[], nargs='*')
parser.add_argument('--seed', type=int, default=0)
parser.add_argument('--eps', type=float, default=0)
parser.add_argument('--K', type=int, default=10)
parser.add_argument('--exclude', type=int, default=[], nargs='*')
parser.set_defaults(save=False)
parser.set_defaults(show=True)
from common import labdict
parser.set_defaults(show=True)
parser.set_defaults(save=False)
args = parser.parse_args()
np.random.seed(args.seed)
sns.set_palette(palette='colorblind')
colors = sns.color_palette()
cmap = ListedColormap(colors)
fsz = args.font_size
figsz = (args.fig_size, args.fig_size)
plt.rc('font', size=fsz)
plt.rc('axes', titlesize=fsz)
plt.rc('axes', labelsize=fsz)
plt.rc('xtick', labelsize=fsz)
plt.rc('ytick', labelsize=fsz)
plt.rc('legend', fontsize=.66*fsz)
plt.rc('figure', titlesize=fsz)
dpi = args.dpi
show=args.show
plt.close('all')
fig, ax = plt.subplots(1, figsize=figsz)
from common import labdict
print('X: %s, Y: %s'%(args.xvar, args.yvar))
df = pd.read_pickle(args.file)
df.drop(args.exclude)
Nsamples = len(df)
K = args.K
N = len(df)
Ix = np.random.permutation(N)
X_ = df[args.xvar]
for yvar in args.yvar:
Y_ = df[yvar]
#n = N//K
#ix = Ix[n*i:n*(i+1)]
#X = np.delete(X_.to_numpy(), ix)
#Y = np.delete(Y_.to_numpy(), ix)
X = X_[Ix]
Y = Y_[Ix]
Nbins = args.nbins
if len(args.ybins)==0:
Yc, Ybins = pd.qcut(Y,Nbins,retbins=True,duplicates='drop')
else:
Yc, Ybins = pd.cut(Y,args.ybins,retbins=True, duplicates='drop', right=False)
if len(args.xbins)==0:
Xc, Xbins = pd.qcut(X,Nbins,retbins=True,duplicates='drop')
else:
Xc, Xbins = pd.cut(X,args.xbins,retbins=True,duplicates='drop', right=False)
#Yvc = Yc.value_counts(sort=False)
#Xvc = Xc.value_counts(sort=False)
H, xe, ye = np.histogram2d(X, Y, bins=[Xbins, Ybins])
P = H/np.sum(H)
Ptop1 = df['top1'].sum()/len(df)
Ptop5 = df['top5'].sum()/len(df)
Otop1 = Ptop1/(1-Ptop1)
Otop5 = Ptop5/(1-Ptop5)
Py = P.sum(axis=0)
Ptop1xbins = P[Xbins[:-1]==0,:].reshape(-1)/Py
ix = np.arange(len(Ptop1xbins))
ix1 = Ptop1xbins==1
try:
lb = np.max(ix[ix1])+1
except ValueError as e:
lb = 0
Ptop1xbins[0:(lb+1)] = | np.sum(Ptop1xbins[0:(lb+1)]) | numpy.sum |
import numpy as np
class Real():
def __init__(self, value: float = 0):
self.value = np.array([value], dtype=float)
def __add__(self, rhs):
out = Real()
if isinstance(rhs, Real):
out.value = self.value + rhs.value
else:
out.value = self.value + rhs
return out
def __radd__(self, lhs):
out = Real()
if isinstance(lhs, Real):
out.value = lhs.values + self.value
else:
out.value = lhs + self.value
return out
def __sub__(self, rhs):
out = Real()
if isinstance(rhs, Real):
out.value = self.value - rhs.value
else:
out.value = self.value - rhs
return out
def __rsub__(self, lhs):
out = Real()
if isinstance(lhs, Real):
out.value = lhs.value - self.value
else:
out.value = lhs - self.value
return out
def __mul__(self, rhs):
out = Real()
if isinstance(rhs, (Real, Complex, RealMatrix, ComplexMatrix)):
out.value = self.value*rhs.value
elif isinstance(rhs, (float, int, complex)):
out.value = self.value*rhs
return out
def __rmul__(self, lhs):
out = Real()
if isinstance(lhs, (Real, Complex, RealMatrix, ComplexMatrix)):
out.value = lhs.value*self.value
elif isinstance(lhs, (float, int, complex)):
out.value = lhs*self.value
return out
def __pow__(self, n):
out = Real()
if isinstance(n, (float, int)):
out.value = self.value**n
else:
out.value = self.value**n.value
return out
class Complex(Real):
def __init__(self, value: complex = 1j):
super().__init__()
self.value = np.array([value], dtype=complex)
def re(self):
out = Real()
out.value = np.real(self.value)
return out
def im(self):
out = Real()
out.value = np.imag(self.value)
return out
def conj(self):
out = Complex()
out.value = np.conj(self.value)
return out
class RealMatrix():
def __init__(self, N: int = None, value: np.ndarray = None):
if N != None:
self.N = N
self.value = np.zeros((N, N), dtype=float)
else:
self.N = len(value)
self.value = value
def transpose(self):
out = RealMatrix(self.N)
out.value = np.transpose(self.value)
return out
def trace(self):
tr = np.trace(self.value)
return Real(tr)
def det(self):
d = | np.linalg.det(self.value) | numpy.linalg.det |
import numpy as np
import pickle
import os
from copy import deepcopy
from scipy.special import digamma
from pynverse import inversefunc
from utils import bql_f_inv, \
normal_gamma, \
solve_tabular_continuing_PI
# ============================================================================
# General Tabular agent class
# ============================================================================
class TabularAgent:
def __init__(self, gamma):
# Discount factor
self.gamma = gamma
def add_observations(self, s, a, r, s_):
""" Add observations to log. """
s, a, r, s_ = [np.array([data]) for data in [s, a, r, s_]]
if hasattr(self, 'train_s'):
self.train_s = np.concatenate([self.train_s, s], axis=0)
self.train_a = np.concatenate([self.train_a, a], axis=0)
self.train_s_ = np.concatenate([self.train_s_, s_], axis=0)
self.train_r = np.concatenate([self.train_r, r], axis=0)
else:
self.train_s = s
self.train_a = a
self.train_s_ = s_
self.train_r = r
def take_action(self, s, t, policy_params):
raise NotImplementedError
def update_after_step(self, t):
pass
def observe(self, transition):
pass
def save_copy(self, location, name):
""" Save a copy of the agent. """
fhandle = open(location + '/' + name, 'wb')
pickle.dump(self, fhandle)
fhandle.close()
# ============================================================================
# QLearningAgent class
# ============================================================================
class QLearningAgent(TabularAgent):
def __init__(self, params):
# Set QLearning agent parameters
self.gamma = params['gamma']
self.lr = params['lr']
self.sa_list = params['sa_list']
self.Q0 = params['Q0']
self.dither_mode = params['dither_mode']
self.dither_param = params['dither_param']
self.anneal_timescale = params['anneal_timescale']
# Array for storing previous Q posterior
self.Qlog = []
super(QLearningAgent, self).__init__(self.gamma)
# Set initial Q values to Q0, and create set of valid actions
self.Q = {}
self.valid_actions = {}
# List of valid state-actions
for (s, a) in self.sa_list:
if s not in self.Q:
self.Q[s] = {a : self.Q0}
else:
self.Q[s][a] = self.Q0
if s not in self.valid_actions:
self.valid_actions[s] = set([a])
else:
self.valid_actions[s].add(a)
def take_action(self, s, t):
""" Take epsilon-greedy or boltzmann action. """
# Compute annealing factor for epsilon or T
anneal_factor = np.exp(- t / self.anneal_timescale)
if self.dither_mode == 'epsilon-greedy':
# Get action corresponding to highest Q
a = self.get_max_a_Q(s, argmax=True)
if np.random.rand() < anneal_factor * self.dither_param:
# Return random pick from valid actions
return np.random.choice(list(self.valid_actions[s]))
else:
return a
elif self.dither_mode == 'boltzmann':
# Get list of valid actions from state s
valid_actions = list(self.valid_actions[s])
# Get Q values coorespodning to actions from state s
Q_ = np.array([self.Q[s][a] for a in valid_actions])
# Calculate Boltzmann probabilities and normalise
probs = np.exp(Q_ / (self.dither_param * anneal_factor))
probs = probs / probs.sum()
return np.random.choice(valid_actions, p=probs)
def update_Q(self, s, a, r, s_):
""" Update Q-estimates using Temporal Differences update. """
# Get maximum Q corresponding to next state s_
max_a_Q = self.get_max_a_Q(s_)
# Apply Q-Learning update rule
self.Q[s][a] += self.lr * (r + self.gamma * max_a_Q - self.Q[s][a])
def get_max_a_Q(self, s, argmax=False):
""" Returns the maximum of Q[s] across all valid actions. """
# Get list of valid actions
valid_actions = list(self.valid_actions[s])
# Get Q values coorespodning to actions from state s
Q_ = np.array([self.Q[s][a] for a in valid_actions])
if argmax:
# Break ties at random
a_idx = np.random.choice(np.argwhere(Q_ == np.amax(Q_))[:, 0])
return valid_actions[a_idx]
else:
return np.max(Q_)
def observe(self, transition):
t, s, a, r, s_ = transition
self.add_observations(s, a, r, s_)
self.last_transition = transition
def update_after_step(self, max_buffer_length, log):
# Log Q values
if log: self.Qlog.append(deepcopy(self.Q))
# Update Q values
t, s, a, r, s_ = self.last_transition
self.update_Q(s, a, r, s_)
self.last_transition = None
def get_name(self):
name = 'QLearningAgent_{}_param-{}_gamma-{}_lr-{}_Q0-{}-tscale-{}'
name = name.format(self.dither_mode,
self.dither_param,
self.gamma,
self.lr,
self.Q0,
self.anneal_timescale)
return name
# ============================================================================
# BayesianQAgent class
# ============================================================================
class BayesianQAgent(TabularAgent):
def __init__(self, params):
# Bayesian Q-Learning agent parameters
self.gamma = params['gamma']
self.mu0 = params['mu0']
self.lamda = params['lamda']
self.alpha = params['alpha']
self.beta = params['beta']
self.sa_list = params['sa_list']
self.num_mixture_samples = params['num_mixture_samples']
# List for storing Q posterior hyperparameters
self.Qpost_log = []
super(BayesianQAgent, self).__init__(params['gamma'])
# Dict for holding posterior phyperparameters
self.Qpost = {}
# Set normal-gamma prior parameters for each state-action
for s, a in self.sa_list:
if s not in self.Qpost: self.Qpost[s] = {}
self.Qpost[s][a] = (self.mu0, self.lamda, self.alpha, self.beta)
def take_action(self, s, t, reduce_max=True):
# Sample q values for each action from current state
qs, acts = self.sample_q(s)
if reduce_max:
# Return action corresponding to maximum q
return acts[np.argmax(qs)]
else:
return qs, acts
def sample_q(self, s):
# Arrays for holding q samples and corresponding actions
qs, acts = [], []
for a, hyp in self.Qpost[s].items():
# Sample from student-t distribution
st = np.random.standard_t(2 * hyp[2])
# q sample from t: m0 + t * (beta / (lamda * alpha))**0.5
qs.append(hyp[0] + st * (hyp[3] / (hyp[1] * hyp[2]))**0.5)
acts.append(a)
return np.array(qs), np.array(acts)
def kl_matched_hyps(self, s, a, r, s_):
num_samples = self.num_mixture_samples
# Find the action from s_ with the largest mean
a_ = self.max_mu0_action(s_)
# Parameters for next state-action NG and posterior predictive
mu0_, lamda_, alpha_, beta_ = self.Qpost[s_][a_]
coeff = (beta_ * (lamda_ + 1) / (alpha_ * lamda_))**0.5
# Sample from student-t, rescale and add mean
st = np.random.standard_t(2 * alpha_, size=(num_samples,))
z_samp = mu0_ + st * coeff
# Dicount and add reward
z_samp = r + self.gamma * z_samp
# z_sa posterior hyperparameters
mu0_sa, lamda_sa, alpha_sa, beta_sa = self.Qpost[s][a]
# z_sa posterior hyperparameters updated for each sample
mu0_ = (lamda_sa * mu0_sa + z_samp) / (lamda_sa + 1)
lamda_ = np.array([lamda_sa + 1] * mu0_.shape[0])
alpha_ = np.array([alpha_sa + 0.5] * mu0_.shape[0])
beta_ = beta_sa + lamda_sa * (z_samp - mu0_sa)**2 / (2 * lamda_sa + 2)
# Sample mu and tau for each set of updated hyperparameters
mus, taus = normal_gamma(mu0_, lamda_, alpha_, beta_)
# MC estimates of moments
E_tau = np.mean(taus)
E_mu_tau = np.mean(mus * taus)
E_mu2_tau = np.mean(mus**2 * taus)
E_log_tau = np.mean(np.log(taus))
# f^-1(x) where f(x) = log(x) - digamma(x)
f_inv_term = bql_f_inv(np.log(E_tau) - E_log_tau)
# Calculate hyperparameters of KL-matched normal gamma
mu0 = E_mu_tau / E_tau
lamda = 1 / (1e-12 + E_mu2_tau - E_tau * mu0**2)
alpha = max(1 + 1e-6, f_inv_term)
beta = alpha / E_tau
return mu0, lamda, alpha, beta
def max_mu0_action(self, s):
# Get actions and corresponding hyperparameters of R_sa distribution
a_mu0 = [(a, hyp[0]) for (a, hyp) in self.Qpost[s].items()]
a, mu0 = [np.array(arr) for arr in zip(*a_mu0)]
return a[np.argmax(mu0)]
def observe(self, transition):
t, s, a, r, s_ = transition
self.add_observations(s, a, r, s_)
self.last_transition = transition
def update_after_step(self, max_buffer_length, log):
# Log Q posterior hyperparameters
if log: self.Qpost_log.append(deepcopy(self.Qpost))
# Update hyperparameters
t, s, a, r, s_ = self.last_transition
hyps = self.kl_matched_hyps(s, a, r, s_)
self.Qpost[s][a] = hyps
self.last_transition = None
def get_name(self):
name = 'BayesianQAgent_gamma-{}_mu0-{}_lamda-{}_alpha-{}_beta-{}'
name = name.format(self.gamma,
self.mu0,
self.lamda,
self.alpha,
self.beta)
return name
# ============================================================================
# PSRLAgent agent definition
# ============================================================================
class PSRLAgent(TabularAgent):
def __init__(self, params):
# PSRL agent parameters
self.gamma = params['gamma']
self.kappa = params['kappa']
self.mu0 = params['mu0']
self.lamda = params['lamda']
self.alpha = params['alpha']
self.beta = params['beta']
self.sa_list = params['sa_list']
self.max_iter = params['max_iter']
self.Ppost = {}
self.Rpost = {}
self.buffer = []
self.num_s = len(set([s for (s, a) in self.sa_list]))
self.num_a = len(set([a for (s, a) in self.sa_list]))
# Lists for storing P and R posteriors
self.Ppost_log = []
self.Rpost_log = []
super(PSRLAgent, self).__init__(params['gamma'])
# Dynamics posterior
self.Ppost = self.kappa * np.ones((self.num_s, self.num_a, self.num_s))
# Rewards posterior parameters for non-allowed actions
Rparam = [-1e12, 1e9, 1e12, 1e9]
Rparam = [[[Rparam] * self.num_s] * self.num_a] * self.num_s
self.Rpost = np.array(Rparam)
# Rewards posterior parameters for allowed actions
Rparam = [self.mu0, self.lamda, self.alpha, self.beta]
Rparam = np.array([Rparam] * self.num_s)
for (s, a) in self.sa_list:
self.Rpost[s, a, ...] = Rparam
self.sample_posterior_and_update_continuing_policy()
def sample_posterior(self):
# Initialise posterior arrays (dynamics 0, reward large negative)
P = np.zeros((self.num_s, self.num_a, self.num_s))
R = np.zeros((self.num_s, self.num_a, self.num_s))
for s in range(self.num_s):
for a in range(self.num_a):
P[s, a, :] = np.random.dirichlet(self.Ppost[s, a])
for s in range(self.num_s):
for a in range(self.num_a):
for s_ in range(self.num_s):
mu0, lamda, alpha, beta = self.Rpost[s, a, s_]
R[s, a, s_] = normal_gamma(mu0, lamda, alpha, beta)[0]
return P, R
def update_posterior(self):
# Transition counts and reward sums
p_counts = np.zeros((self.num_s, self.num_a, self.num_s))
r_sums = np.zeros((self.num_s, self.num_a, self.num_s))
r_counts = np.zeros((self.num_s, self.num_a, self.num_s))
for (s, a, r, s_) in self.buffer:
p_counts[s, a, s_] += 1
r_sums[s, a, s_] += r
r_counts[s, a, s_] += 1
# Update dynamics posterior
for s in range(self.num_s):
for a in range(self.num_a):
# Dirichlet posterior params are prior params plus counts
self.Ppost[s, a] = self.Ppost[s, a] + p_counts[s, a]
# Update rewards posterior
for s in range(self.num_s):
for a in range(self.num_a):
for s_ in range(self.num_s):
mu0, lamda, alpha, beta = self.Rpost[s, a, s_]
# Calculate moments
M1 = r_sums[s, a, s_] / max(1, r_counts[s, a, s_])
M2 = r_sums[s, a, s_]**2 / max(1, r_counts[s, a, s_])
n = r_counts[s, a, s_]
# Update parameters
mu0_ = (lamda * mu0 + n * M1) / (lamda + n)
lamda_ = lamda + n
alpha_ = alpha + 0.5 * n
beta_ = beta + 0.5 * n * (M2 - M1**2)
beta_ = beta_ + n * lamda * (M1 - mu0)**2 / (2 * (lamda + n))
self.Rpost[s, a, s_] = np.array([mu0_, lamda_, alpha_, beta_])
# Reset episode buffer
self.buffer = []
def take_action(self, s, t):
return self.pi[s]
def observe(self, transition):
t, s, a, r, s_ = transition
self.add_observations(s, a, r, s_)
self.buffer.append([s, a, r, s_])
def update_after_step(self, max_buffer_length, log):
# Log posterior values
if log:
self.Ppost_log.append(deepcopy(self.Ppost))
self.Rpost_log.append(deepcopy(self.Rpost))
if len(self.buffer) >= max_buffer_length:
self.update_posterior()
self.sample_posterior_and_update_continuing_policy()
def sample_posterior_and_update_continuing_policy(self):
# Sample dynamics and rewards posterior
P, R = self.sample_posterior()
# Solve Bellman equation by policy iteration
pi, Q = solve_tabular_continuing_PI(P, R, self.gamma, self.max_iter)
self.pi = pi
def get_name(self):
return 'PSRLAgent_gamma-{}'.format(self.gamma)
# ============================================================================
# UbeNoUnrollAgent class
# ============================================================================
class UbeNoUnrollAgent(TabularAgent):
def __init__(self, params):
self.Rmax = params['Rmax']
self.kappa = params['kappa']
self.mu0 = params['mu0']
self.lamda = params['lamda']
self.alpha = params['alpha']
self.beta = params['beta']
self.zeta = params['zeta']
self.sa_list = params['sa_list']
self.max_iter = params['max_iter']
self.num_dyn_samples = params['num_dyn_samples']
self.num_s = len(set([s for (s, a) in self.sa_list]))
self.num_a = len(set([a for (s, a) in self.sa_list]))
super(UbeNoUnrollAgent, self).__init__(params['gamma'])
# Set episode buffer
self.buffer = []
# Dynamics posterior
self.Ppost = self.kappa * np.ones((self.num_s, self.num_a, self.num_s))
# Rewards posterior parameters for non-allowed actions
Rparam_ = [[[[-1e12, 1e9, 1e12, 1e9]] * self.num_s] * self.num_a] * self.num_s
self.Rpost = np.array(Rparam_)
Rparam = np.array([[self.mu0, self.lamda, self.alpha, self.beta]] * self.num_s)
for (s, a) in self.sa_list:
self.Rpost[s, a, ...] = Rparam
self.set_Q_posterior()
self.pi_log, self.Qmu_log, self.Qvar_log = [], [], []
def update_posterior(self):
# Transition counts and reward sums
p_counts = np.zeros((self.num_s, self.num_a, self.num_s))
r_sums = np.zeros((self.num_s, self.num_a, self.num_s))
r_counts = np.zeros((self.num_s, self.num_a, self.num_s))
for (s, a, r, s_) in self.buffer:
p_counts[s, a, s_] += 1
r_sums[s, a, s_] += r
r_counts[s, a, s_] += 1
# Update dynamics posterior
for s in range(self.num_s):
for a in range(self.num_a):
# Dirichlet posterior params are prior params plus counts
self.Ppost[s, a] = self.Ppost[s, a] + p_counts[s, a]
# Update rewards posterior
for s in range(self.num_s):
for a in range(self.num_a):
for s_ in range(self.num_s):
mu0, lamda, alpha, beta = self.Rpost[s, a, s_]
# Calculate moments
M1 = r_sums[s, a, s_] / max(1, r_counts[s, a, s_])
M2 = r_sums[s, a, s_]**2 / max(1, r_counts[s, a, s_])
n = r_counts[s, a, s_]
# Update parameters
mu0_ = (lamda * mu0 + n * M1) / (lamda + n)
lamda_ = lamda + n
alpha_ = alpha + 0.5 * n
beta_ = beta + 0.5 * n * (M2 - M1**2)
beta_ = beta_ + n * lamda * (M1 - mu0)**2 / (2 * (lamda + n))
self.Rpost[s, a, s_] = np.array([mu0_, lamda_, alpha_, beta_])
# Reset episode buffer
self.buffer = []
def set_Q_posterior(self):
'''
Computes the approximation (diagonal gaussian) of the Q posterior
under policy pi.
'''
# Get expectations of P and R under posterior
P, R = self.get_expected_P_and_R()
# Compute the greedy policy and corresponding Q values
pi, Qmu = solve_tabular_continuing_PI(P, R, self.gamma, self.max_iter)
# Compute the uncertainty (variance) of Q
Qvar = self.solve_bellman(self.local_rew_var,
self.gamma**2,
pi)
# Set policy, Q and Q epistemic variance upper bound
self.pi = pi
self.Qmu = Qmu
self.Qvar = Qvar
def get_expected_P_and_R(self):
return self.Ppost / self.Ppost.sum(axis=-1)[..., None], self.Rpost[..., 0]
def take_action(self, s, t, reduce_max=True):
# Posterior mean and variance
mu = self.Qmu[s, :]
var = self.Qvar[s, :]
# Sample Q from diagonal gaussian
Q_sample = np.random.normal(loc=mu, scale=(self.zeta * var**0.5))
# Return argmax to choose action
if reduce_max:
return | np.argmax(Q_sample) | numpy.argmax |
import random
import numpy as np
from deap import base, creator, tools
from deap.tools.emo import selNSGA2
import h5py
import vectorization_tools
from mnist_member import MnistMember
from digit_mutator import DigitMutator
from predictor2 import Predictor
from timer import Timer
from utils import print_archive, print_archive_experiment
import archive_manager2
from individual import Individual
from config import NGEN, \
POPSIZE, INITIALPOP, \
RESEEDUPPERBOUND, GENERATE_ONE_ONLY, DATASET, \
STOP_CONDITION, STEPSIZE, DJ_DEBUG
# Load the dataset.
hf = h5py.File(DATASET, 'r')
x_test = hf.get('xn')
x_test = | np.array(x_test) | numpy.array |
import numpy as np
import numpy.testing as npt
from stumpy import scrump, stump, config
from stumpy.scrump import prescrump
import pytest
import naive
test_data = [
(
np.array([9, 8100, -60, 7], dtype=np.float64),
np.array([584, -11, 23, 79, 1001, 0, -19], dtype=np.float64),
),
(
np.random.uniform(-1000, 1000, [8]).astype(np.float64),
np.random.uniform(-1000, 1000, [64]).astype(np.float64),
),
]
window_size = [8, 16, 32]
substitution_locations = [(slice(0, 0), 0, -1, slice(1, 3), [0, 3])]
substitution_values = [np.nan, np.inf]
percentages = [(0.01, 0.1, 1.0)]
@pytest.mark.parametrize("T_A, T_B", test_data)
def test_prescrump_self_join(T_A, T_B):
m = 3
zone = int(np.ceil(m / 4))
for s in range(1, zone + 1):
seed = np.random.randint(100000)
np.random.seed(seed)
ref_P, ref_I = naive.prescrump(T_B, m, T_B, s=s, exclusion_zone=zone)
np.random.seed(seed)
comp_P, comp_I = prescrump(T_B, m, s=s)
npt.assert_almost_equal(ref_P, comp_P)
npt.assert_almost_equal(ref_I, comp_I)
@pytest.mark.parametrize("T_A, T_B", test_data)
def test_prescrump_A_B_join(T_A, T_B):
m = 3
zone = int(np.ceil(m / 4))
for s in range(1, zone + 1):
seed = np.random.randint(100000)
np.random.seed(seed)
ref_P, ref_I = naive.prescrump(T_A, m, T_B, s=s)
np.random.seed(seed)
comp_P, comp_I = prescrump(T_A, m, T_B=T_B, s=s)
npt.assert_almost_equal(ref_P, comp_P)
npt.assert_almost_equal(ref_I, comp_I)
@pytest.mark.parametrize("T_A, T_B", test_data)
def test_prescrump_A_B_join_swap(T_A, T_B):
m = 3
zone = int(np.ceil(m / 4))
for s in range(1, zone + 1):
seed = np.random.randint(100000)
np.random.seed(seed)
ref_P, ref_I = naive.prescrump(T_B, m, T_A, s=s)
np.random.seed(seed)
comp_P, comp_I = prescrump(T_B, m, T_B=T_A, s=s)
npt.assert_almost_equal(ref_P, comp_P)
npt.assert_almost_equal(ref_I, comp_I)
@pytest.mark.parametrize("T_A, T_B", test_data)
@pytest.mark.parametrize("m", window_size)
def test_prescrump_self_join_larger_window(T_A, T_B, m):
if len(T_B) > m:
zone = int(np.ceil(m / 4))
for s in range(1, zone + 1):
seed = np.random.randint(100000)
np.random.seed(seed)
ref_P, ref_I = naive.prescrump(T_B, m, T_B, s=s, exclusion_zone=zone)
np.random.seed(seed)
comp_P, comp_I = prescrump(T_B, m, s=s)
npt.assert_almost_equal(ref_P, comp_P)
npt.assert_almost_equal(ref_I, comp_I)
def test_scrump_int_input():
with pytest.raises(TypeError):
scrump(np.arange(10), 5, ignore_trivial=True, percentage=1.0, pre_scrump=False)
@pytest.mark.parametrize("T_A, T_B", test_data)
@pytest.mark.parametrize("percentages", percentages)
def test_scrump_self_join(T_A, T_B, percentages):
m = 3
zone = int(np.ceil(m / 4))
for percentage in percentages:
seed = np.random.randint(100000)
np.random.seed(seed)
ref_mp = naive.scrump(T_B, m, T_B, percentage, zone, False, None)
ref_P = ref_mp[:, 0]
ref_I = ref_mp[:, 1]
ref_left_I = ref_mp[:, 2]
ref_right_I = ref_mp[:, 3]
np.random.seed(seed)
approx = scrump(
T_B, m, ignore_trivial=True, percentage=percentage, pre_scrump=False
)
approx.update()
comp_P = approx.P_
comp_I = approx.I_
comp_left_I = approx.left_I_
comp_right_I = approx.right_I_
naive.replace_inf(ref_P)
naive.replace_inf(comp_P)
npt.assert_almost_equal(ref_P, comp_P)
npt.assert_almost_equal(ref_I, comp_I)
npt.assert_almost_equal(ref_left_I, comp_left_I)
npt.assert_almost_equal(ref_right_I, comp_right_I)
@pytest.mark.parametrize("T_A, T_B", test_data)
@pytest.mark.parametrize("percentages", percentages)
def test_scrump_A_B_join(T_A, T_B, percentages):
m = 3
for percentage in percentages:
seed = np.random.randint(100000)
np.random.seed(seed)
ref_mp = naive.scrump(T_A, m, T_B, percentage, None, False, None)
ref_P = ref_mp[:, 0]
ref_I = ref_mp[:, 1]
ref_left_I = ref_mp[:, 2]
ref_right_I = ref_mp[:, 3]
np.random.seed(seed)
approx = scrump(
T_A, m, T_B, ignore_trivial=False, percentage=percentage, pre_scrump=False
)
approx.update()
comp_P = approx.P_
comp_I = approx.I_
comp_left_I = approx.left_I_
comp_right_I = approx.right_I_
naive.replace_inf(ref_P)
naive.replace_inf(comp_P)
npt.assert_almost_equal(ref_P, comp_P)
npt.assert_almost_equal(ref_I, comp_I)
npt.assert_almost_equal(ref_left_I, comp_left_I)
npt.assert_almost_equal(ref_right_I, comp_right_I)
@pytest.mark.parametrize("T_A, T_B", test_data)
@pytest.mark.parametrize("percentages", percentages)
def test_scrump_A_B_join_swap(T_A, T_B, percentages):
m = 3
for percentage in percentages:
seed = np.random.randint(100000)
np.random.seed(seed)
ref_mp = naive.scrump(T_B, m, T_A, percentage, None, False, None)
ref_P = ref_mp[:, 0]
# ref_I = ref_mp[:, 1]
ref_left_I = ref_mp[:, 2]
ref_right_I = ref_mp[:, 3]
np.random.seed(seed)
approx = scrump(
T_B, m, T_A, ignore_trivial=False, percentage=percentage, pre_scrump=False
)
approx.update()
comp_P = approx.P_
# comp_I = approx.I_
comp_left_I = approx.left_I_
comp_right_I = approx.right_I_
naive.replace_inf(ref_P)
naive.replace_inf(comp_P)
npt.assert_almost_equal(ref_P, comp_P)
npt.assert_almost_equal(ref_P, comp_P)
npt.assert_almost_equal(ref_left_I, comp_left_I)
npt.assert_almost_equal(ref_right_I, comp_right_I)
@pytest.mark.parametrize("T_A, T_B", test_data)
@pytest.mark.parametrize("m", window_size)
@pytest.mark.parametrize("percentages", percentages)
def test_scrump_self_join_larger_window(T_A, T_B, m, percentages):
if len(T_B) > m:
zone = int(np.ceil(m / 4))
for percentage in percentages:
seed = np.random.randint(100000)
np.random.seed(seed)
ref_mp = naive.scrump(T_B, m, T_B, percentage, zone, False, None)
ref_P = ref_mp[:, 0]
ref_I = ref_mp[:, 1]
ref_left_I = ref_mp[:, 2]
ref_right_I = ref_mp[:, 3]
np.random.seed(seed)
approx = scrump(
T_B, m, ignore_trivial=True, percentage=percentage, pre_scrump=False
)
approx.update()
comp_P = approx.P_
comp_I = approx.I_
comp_left_I = approx.left_I_
comp_right_I = approx.right_I_
naive.replace_inf(ref_P)
naive.replace_inf(comp_P)
npt.assert_almost_equal(ref_P, comp_P)
npt.assert_almost_equal(ref_I, comp_I)
npt.assert_almost_equal(ref_left_I, comp_left_I)
npt.assert_almost_equal(ref_right_I, comp_right_I)
@pytest.mark.parametrize("T_A, T_B", test_data)
def test_scrump_self_join_full(T_A, T_B):
m = 3
zone = int(np.ceil(m / 4))
ref_mp = naive.stamp(T_B, m, exclusion_zone=zone)
ref_P = ref_mp[:, 0]
ref_I = ref_mp[:, 1]
ref_left_I = ref_mp[:, 2]
ref_right_I = ref_mp[:, 3]
approx = scrump(T_B, m, ignore_trivial=True, percentage=1.0, pre_scrump=False)
approx.update()
comp_P = approx.P_
comp_I = approx.I_
comp_left_I = approx.left_I_
comp_right_I = approx.right_I_
naive.replace_inf(ref_P)
naive.replace_inf(comp_P)
npt.assert_almost_equal(ref_P, comp_P)
npt.assert_almost_equal(ref_I, comp_I)
| npt.assert_almost_equal(ref_left_I, comp_left_I) | numpy.testing.assert_almost_equal |
import os
import sys
import yaml
import json
import time
import argparse
import numpy as np
import pickle
import matplotlib.pyplot as plt
from torch.utils.data import DataLoader
import src.utils
import src.dataset
import src.evaluation
if __name__ == "__main__":
# config file
parser = argparse.ArgumentParser(description="Test linear model.")
parser.add_argument('--config', type=str, default="config_linear_test.yaml")
args = parser.parse_args()
### END CONFIG ###
### PATHS & CONFIG
project_root = os.getcwd()
data_root = os.path.join(project_root, "datasets/maad")
exp_root = os.path.join(project_root, "experiments")
config_root = os.path.join(project_root, "config")
# config
config_path = os.path.join(config_root, args.config)
with open(config_path, "r") as fin:
config = yaml.load(fin, Loader=yaml.FullLoader)
# data
data_path_test = os.path.join(data_root, config["dataset"]["set"])
# experiment path
method = config["model"]["type"]
run_name = method
date_time = src.utils.get_current_time()
run_name = date_time + "_" + run_name
exp_dir = os.path.join(exp_root, run_name)
if not os.path.exists(exp_dir):
os.makedirs(exp_dir)
# create evaluation directory
eval_dir = "eval_" + src.utils.get_current_time()
eval_path = os.path.join(exp_dir, eval_dir)
if not os.path.isdir(eval_path):
os.makedirs(eval_path)
### DATA
dset_test = src.dataset.MAADDataset(data_path_test, obs_len=config["model"]["obs_len"],
adj_type="identity")
loader_test = DataLoader(dset_test,
batch_size=1,
shuffle=False,
num_workers=1)
### PREDICTION
print("\nPredicting...")
pred_start = time.time()
prediction_data = {}
step = 0
for cnt, batch in enumerate(loader_test):
step += 1
# get data
obs_traj, obs_traj_rel, frame_ids, seq_ids, labels, V_obs, A_obs = batch
# prepare data
obs_traj = obs_traj.numpy()[0] # its anyway batch size = 1
frame_ids = frame_ids.numpy()[0].tolist()
seq_ids = seq_ids.numpy()[0]
labels = labels.numpy()[0]
# init linear trajectory
linear_traj = np.zeros(obs_traj.shape)
N = obs_traj.shape[0]
# model each agent individually
for i in range(N):
# get agent trajectory
agent_traj = obs_traj[i]
# trajectory features
start_pos = agent_traj[:, 0]
end_pos = agent_traj[:, -1]
n_ts = agent_traj.shape[1]
if method == "cvm":
# CVM
velocity = agent_traj[:, 1] - agent_traj[:, 0]
approx_agent_traj = np.zeros(agent_traj.shape) + velocity[:, np.newaxis]
approx_agent_traj[:, 0] = start_pos
approx_agent_traj = np.cumsum(approx_agent_traj, axis=1)
elif method == "lti":
# LTI
x_interp = np.linspace(start_pos[0], end_pos[0], n_ts)
y_interp = np.linspace(start_pos[1], end_pos[1], n_ts)
approx_agent_traj = | np.zeros(agent_traj.shape) | numpy.zeros |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 10 09:16:42 2022
@author: mbonnema
"""
import os
from netCDF4 import Dataset
import matplotlib.pyplot as plt
#import geopandas as geo
import datetime
import numpy as np
from datetime import timedelta
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import pandas as pd
import ee
ee.Initialize()
import shapely
import matplotlib.lines as mlines
import csv
from readCSV import readCSV
from FilterS1 import FilterS1
from FilterJRC import FilterJRC
from InterpS1 import InterpS1
from InterpJRC import InterpJRC
print('Preparing Data...')
dataDir = '../../Results/World_Ver3_CSV/'
print('\tReading data csv files...')
D,A,LE,WE,ND = readCSV(dataDir)
Ds1 = {}
As1 = {}
Dgsw = {}
Agsw = {}
#print(LE['1646'])
[Ds1, Dgsw] = map(lambda keys: {x: D[x] for x in keys}, [WE.keys(), ND.keys()])
[As1, Agsw] = map(lambda keys: {x: A[x] for x in keys}, [WE.keys(), ND.keys()])
print('\t\tComplete')
print('\tFiltering area data...')
Ds1,As1,WE,LE = FilterS1(Ds1,As1,WE,LE)
Dgsw,Agsw,ND = FilterJRC(Dgsw,Agsw,ND)
D = {}
A = {}
D.update(Ds1)
D.update(Dgsw)
A.update(As1)
A.update(Agsw)
print('\t\tComplete')
print('\tLoading Lake Database Fields...')
lakes = ee.FeatureCollection('users/matthewbonnema/HydroLAKES')
largeLakes = lakes.filter(ee.Filter.gte('Lake_area',1))
lakeID = largeLakes.aggregate_array('Hylak_id').getInfo()
lakeType = largeLakes.aggregate_array('Lake_type').getInfo()
lakeLat = largeLakes.aggregate_array('Pour_lat').getInfo()
lakeLon = largeLakes.aggregate_array('Pour_long').getInfo()
lakeArea = largeLakes.aggregate_array('Lake_area').getInfo()
print('\t\tComplete')
print('\tCompute Area Variations...')
Av = []
Avp = []
Am = []
A_database = []
Amin = []
Amax = []
lat = []
lon = []
Ltype = []
for key in D:
try:
a = A[key]
stda = np.std(a)
mina = np.nanmin(a)
maxa = np.nanmax(a)
vara = maxa - mina
meana = np.nanmean(a)
varap = vara/meana
ad = lakeArea[lakeID.index(int(key))]
index = lakeID.index(int(key))
if np.isnan(mina) or np.isnan(maxa) or np.isnan(meana) or np.isnan(vara):
continue
Av.append(vara)
Avp.append(varap)
Am.append(meana)
A_database.append(ad)
Amin.append(mina)
Amax.append(maxa)
lat.append(lakeLat[index])
lon.append(lakeLon[index])
lt = lakeType[index]
if lt == 3:
lt = 2
Ltype.append(lt)
except:
continue
A_database = np.array(A_database)[np.isfinite(np.array(Avp))]
Av = np.array(Av)[np.isfinite(np.array(Avp))]
Am = np.array(Am)[np.isfinite( | np.array(Avp) | numpy.array |
import numpy as np
import tinyobjloader
def obj_loader(path):
# Create reader.
reader = tinyobjloader.ObjReader()
# Load .obj(and .mtl) using default configuration
ret = reader.ParseFromFile(path)
if ret == False:
print("Failed to load : ", path)
return None
# note here for wavefront obj, #v might not equal to #vt, same as #vn.
attrib = reader.GetAttrib()
v = np.array(attrib.vertices).reshape(-1, 3)
vn = np.array(attrib.normals).reshape(-1, 3)
vt = np.array(attrib.texcoords).reshape(-1, 2)
shapes = reader.GetShapes()
tri = shapes[0].mesh.numpy_indices().reshape(-1, 9)
f_v = tri[:, [0, 3, 6]]
f_vn = tri[:, [1, 4, 7]]
f_vt = tri[:, [2, 5, 8]]
faces = f_v #[m, 3]
face_normals = vn[f_vn].mean(axis=1) #[m, 3]
face_uvs = vt[f_vt].mean(axis=1) #[m, 2]
verts = v #[n, 3]
vert_normals = np.zeros((verts.shape[0], 3), dtype=np.float32) #[n, 3]
vert_normals[f_v.reshape(-1)] = vn[f_vn.reshape(-1)]
vert_uvs = np.zeros((verts.shape[0], 2), dtype=np.float32) #[n, 2]
vert_uvs[f_v.reshape(-1)] = vt[f_vt.reshape(-1)]
return verts, faces, vert_normals, face_normals, vert_uvs, face_uvs
def load_obj_mesh_for_Hoppe(mesh_file):
vertex_data = []
face_data = []
if isinstance(mesh_file, str):
f = open(mesh_file, "r")
else:
f = mesh_file
for line in f:
if isinstance(line, bytes):
line = line.decode("utf-8")
if line.startswith('#'):
continue
values = line.split()
if not values:
continue
if values[0] == 'v':
v = list(map(float, values[1:4]))
vertex_data.append(v)
elif values[0] == 'f':
# quad mesh
if len(values) > 4:
f = list(map(lambda x: int(x.split('/')[0]), values[1:4]))
face_data.append(f)
f = list(map(lambda x: int(x.split('/')[0]), [values[3], values[4], values[1]]))
face_data.append(f)
# tri mesh
else:
f = list(map(lambda x: int(x.split('/')[0]), values[1:4]))
face_data.append(f)
vertices = np.array(vertex_data)
faces = | np.array(face_data) | numpy.array |
import numpy as np
import numba
from numba import jit
from sklearn import metrics
import random
import traj_tools
numericThresh = 1E-150
logNumericThresh = | np.log(numericThresh) | numpy.log |
import torch
import numpy as np
import os
import pandas as pd
import re
import sys
import tqdm
from absl import flags
import chexpert_labeler
from api.models.base import DataParallelCPU
from api.models.nondiff import CheXpert
from api.metrics import Bleu, Rouge, CiderD as Cider, MentionSim
from api.utils import to_numpy
flags.DEFINE_string('do', None, '')
flags.DEFINE_enum('dataset', None, ['mimic-cxr', 'open-i'], 'Dataset to use')
flags.DEFINE_string('raw', None, '')
flags.DEFINE_string('cache', None, '')
flags.DEFINE_list('remove_tokens', [], '')
FLAGS = flags.FLAGS
def compile():
re_objs = [re.compile(token) for token in FLAGS.remove_tokens]
bleu = Bleu(4)
rouge = Rouge()
cider = Cider(df_cache=torch.load(os.path.join(cache_dir, 'cider-cache.pkl')))
_df = pd.read_csv(FLAGS.raw, sep='\t').fillna('')
_df = _df.rename(columns={'pred_text': 'text'})
df_sentence = pd.read_csv(df_sentence_path, sep='\t')
df_report = pd.read_csv(df_report_path, sep='\t')
rad_ids = set(_df.rad_id) & set(df_sentence.rad_id)
df = pd.merge(
df_sentence.loc[df_sentence.rad_id.isin(rad_ids)].groupby('rad_id').sentence.apply(' '.join).rename('text').reset_index(),
df_report.loc[df_report.rad_id.isin(rad_ids)].drop(columns='text', errors='ignore'),
on='rad_id',
)
_df = _df[_df.rad_id.isin(rad_ids)]
df = _df[['rad_id']].merge(df, on='rad_id', how='left')
df_metric = pd.DataFrame(
{'rad_id': _df.rad_id},
index=range(len(_df)),
)
for index in tqdm.trange(len(_df)):
_text = _df.text.iloc[index]
for re_obj in re_objs:
_text = re_obj.sub('', _text)
text = df.text.iloc[index]
for scorer in [bleu, rouge, cider]:
report_score = scorer([_text], [text])
if report_score.dim() == 2:
for (num, _report_score) in enumerate(report_score):
df_metric.loc[index, f'{scorer.method()}-{num + 1}'] = _report_score.mean().item()
else:
df_metric.loc[index, f'{scorer.method()}'] = report_score.mean().item()
print('Evaluating CheXpert label...')
label = df[chexpert_labeler.CATEGORIES].values
if all(map(_df.columns.__contains__, chexpert_labeler.CATEGORIES)):
chexpert = None
_label = _df[chexpert_labeler.CATEGORIES].values
else:
chexpert = DataParallelCPU(CheXpert, num_jobs=None, maxtasksperchild=256, verbose=True)
_label = to_numpy(chexpert(_df.text.values))
index = _label * 4 + label
for (num, category) in enumerate(chexpert_labeler.CATEGORIES):
df_metric[category] = index[:, num]
df_metric.to_csv(FLAGS.cache, sep='\t', index=False)
if chexpert is not None:
chexpert.close()
def calc():
df_metric = pd.read_csv(FLAGS.cache, sep='\t')
tp = np.array([
np.nan, np.nan, np.nan, 0.0,
np.nan, np.nan, np.nan, 0.5,
np.nan, np.nan, np.nan, 0.0,
np.nan, np.nan, np.nan, 1.0,
])
fn = 1 - tp
fp = np.array([
0.0, np.nan, 0.0, np.nan,
0.5, np.nan, 0.5, np.nan,
0.0, np.nan, 0.0, np.nan,
1.0, np.nan, 1.0, np.nan,
])
tn = 1 - fp
index = df_metric[chexpert_labeler.CATEGORIES]
tp = np.nansum(tp[index], axis=0)
fn = np.nansum(fn[index], axis=0)
fp = np.nansum(fp[index], axis=0)
tn = | np.nansum(tn[index], axis=0) | numpy.nansum |
# ND2 extractor, Kymograph generator
# author: <NAME>
# product manager: <NAME>, <NAME>
# Special thanks for technical support: <NAME>
#
#
# Library dependence:
# use nd2reader 2.1.3, don't use the new version!!!!!
# library install instructions:
# In terminal, type:
# nd2reader: In terminal, type: "pip install "nd2reader==2.1.3"" or "pip3 install "nd2reader==2.1.3""
# PIL: In terminal, type: "pip install Pillow" or "pip3 install Pillow"
# pims: In terminal, type: "pip install pims_nd2" or "pip3 install pims_nd2"
#
# # Todo: create a GUI
import matplotlib.pyplot as pl
import glob # pathname pattern
from PIL import Image
# from ND2 extractor
import nd2reader
import os
import PIL
import numpy as np
from pims import ND2_Reader
import xml.etree.cElementTree as ET
import re
import pathos.multiprocessing
import multiprocessing
from datetime import datetime
import h5py
from tifffile import imsave
# todo: fix extractor xml file problem
# todo: new class for segmentation & lineage tracking
# step 1, extract ND2 as usual
class ND2_extractor():
def __init__(self, nd2_file, file_directory, xml_file=None, xml_dir=None, output_path=None):
self.input_path = file_directory
self.nd2_file = nd2_file
self.nd2_file_name = nd2_file[:-4]
self.xml_file = xml_file
self.xml_dir = xml_dir
self.output_path = output_path
self.main_dir = file_directory + "/" + self.nd2_file_name
self.nd2_f = nd2_file
self.file_dir = file_directory
self.pos_dict = None
self.pos_offset = None
self.lane_dict = None
def lane_info(self):
# dict for lane info
nd2_new = ND2_Reader(self.nd2_file)
nd2_new.iter_axes = 'm'
lane_dict = {}
lane_dict[0] = 1
pos_offset = {}
cur_lane = 1
pos_min = 0
pos_offset[cur_lane] = pos_min - 1
y_prev = nd2_new[0].metadata['y_um']
pos_num = len(nd2_new)
for i in range(1, pos_num):
f = nd2_new[i]
y_now = f.metadata['y_um']
if abs(y_now - y_prev) > 200: # a new lane
cur_lane += 1
pos_min = i - 1
pos_offset[cur_lane] = pos_min
lane_dict[i] = cur_lane
y_prev = y_now
nd2_new.close()
self.lane_dict = lane_dict
self.pos_offset = pos_offset
def pos_info(self):
cur_dir = os.getcwd()
os.chdir(self.xml_dir)
tree = ET.ElementTree(file=self.xml_file)
root = tree.getroot()[0]
pos_dict = {}
lane_dict = {}
pos_offset = {}
lane_count = 0
lane_name_prev = None
dummy_count = 0
for i in root:
if i.tag.startswith('Point'):
ind = int(i.tag[5:])
pos_name = i[1].attrib['value']
if len(pos_name) < 1:
pos_name = "dummy_" + str(dummy_count)
dummy_count += 1
lane_name_cur = "dummy"
else:
lane_name_cur = re.match(r'\w', pos_name).group()
if lane_name_cur != lane_name_prev:
lane_name_prev = lane_name_cur
lane_count += 1
pos_offset[lane_count] = ind - 1
lane_dict[ind] = lane_count
pos_dict[ind] = pos_name
os.chdir(cur_dir)
self.pos_dict = pos_dict
self.lane_dict = lane_dict
self.pos_offset = pos_offset
def tiff_extractor(self, pos):
nd2 = nd2reader.Nd2(self.nd2_f)
if self.pos_dict:
new_dir = self.main_dir + "/Lane_" + str(self.lane_dict[pos]).im(2) + "/" + self.pos_dict[pos] + "/"
else:
lane_ind = self.lane_dict[pos]
pos_off = self.pos_offset[lane_ind]
new_dir = self.main_dir + "/Lane_" + str(lane_ind).zfill(2) + "/pos_" + str(pos - pos_off).zfill(3) + "/"
# create a folder for each position
if not os.path.exists(new_dir):
os.makedirs(new_dir)
os.chdir(new_dir)
if self.pos_dict:
meta_name = self.nd2_file_name + "_" + self.pos_dict[pos] + "_t"
else:
meta_name = self.nd2_file_name + "_pos_" + str(pos - pos_off).zfill(3) + "_t"
for image in nd2.select(fields_of_view=pos):
channel = image._channel
channel = str(channel.encode('ascii', 'ignore'))
time_point = image.frame_number
tiff_name = meta_name + str(time_point).zfill(4) + "_c_" + channel + ".tiff"
# save file in 16-bit
# thanks to http://shortrecipes.blogspot.com/2009/01/python-python-imaging-library-16-bit.html
image = image.base.astype(np.uint16)
out = PIL.Image.frombytes("I;16", (image.shape[1], image.shape[0]), image.tobytes())
out.save(tiff_name)
os.chdir(self.file_dir)
def run_extraction(self):
start_t = datetime.now()
os.chdir(self.input_path)
# get position name if xml is available
if self.xml_file:
if not self.xml_dir:
self.xml_dir = self.input_path
self.pos_info()
# otherwise get lane info from y_um
else:
self.lane_info()
os.chdir(self.input_path)
# switch to another ND2reader for faster iterations
nd2 = nd2reader.Nd2(self.nd2_file)
main_dir = self.input_path + "/" + self.nd2_file_name
if not os.path.exists(main_dir):
os.makedirs(main_dir)
# parallelize extraction
poses = nd2.fields_of_view
cores = pathos.multiprocessing.cpu_count()
pool = pathos.multiprocessing.Pool(cores)
pool.map(self.tiff_extractor, poses)
time_elapsed = datetime.now() - start_t
print('Time elapsed for extraction (hh:mm:ss.ms) {}'.format(time_elapsed))
#############
# todo: deal with trenches at bottom & one fov with 2 trenches
# todo: incorporate Sadik's Phase Contrast channel
# todo: rotation correction for poor aligned chips
# todo: trench identification with multiple channels
class trench_kymograph():
def __init__(self, nd2_file, main_directory, lane, pos, channel, seg_channel, trench_length, trench_width, spatial,
drift_correct=0, find_correct=0, frame_start=None, frame_limit=None, output_dir=None,
box_info=None, trench_detect_start=None, trench_detect_end=None):
self.prefix = nd2_file[:-4]
self.main_path = main_directory
self.lane = lane
self.channel = channel
self.seg_channel = seg_channel
self.pos = pos
self.trench_length = trench_length
self.trench_width = trench_width
self.frame_start = frame_start
self.frame_limit = frame_limit
self.seg_channel = seg_channel
self.drift_correct = drift_correct
self.find_correct = find_correct
self.drift_x = None
self.drift_y = None
self.drift_x_txt = None
self.drift_y_txt = None
self.spatial = spatial # 0 for top, 1 for bottom, 2 for both
self.tops = []
self.bottoms = []
self.meta = None
self.height = None
self.width = None
self.total_t = None
self.out_file = None
self.box_info = box_info # file names
self.file_list = None
self.frame_end = None
self.trench_detect_start = trench_detect_start
self.trench_detect_end = trench_detect_end
self.file_list_trench_detect = None
# TODO: change the path pattern if you didn't extract the ND2 with my extractor
self.file_path = self.main_path + "/" + self.prefix + "/Lane_" + str(self.lane).zfill(2) + "/pos_" + str(
self.pos).zfill(3)
if output_dir:
self.output_dir = output_dir
else:
self.output_dir = self.file_path
###
# TODO: change the path pattern if you didn't extract the ND2 with my extractor
def get_file_list(self):
os.chdir(self.file_path)
self.file_list = glob.glob('*' + self.channel + '*.tif*')
# print(self.file_path, self.seg_channel, '*' + self.channel + '*.tif*', self.file_list)
# exit()
def get_time(name):
sub_name = name.split('_t0')[1]
# print sub_name
num = sub_name.split('_c')[0]
return int(num)
self.file_list.sort(key=get_time)
# print(self.file_list)
# exit()
if self.frame_start is None:
self.frame_start = 0
if self.frame_limit is None:
self.frame_end = len(self.file_list)
else:
self.frame_end = self.frame_start + self.frame_limit
self.file_list = self.file_list[self.frame_start:self.frame_end]
[self.height, self.width] = pl.imread(self.file_list[0]).shape
return
def get_file_list_for_trench_detection(self):
os.chdir(self.file_path)
self.file_list_trench_detect = glob.glob('*' + self.channel + '*.tif*')
def get_time(name):
sub_name = name.split('_t0')[1]
# print sub_name
num = sub_name.split('_c')[0]
return int(num)
self.file_list_trench_detect.sort(key=get_time)
if self.trench_detect_start is None:
self.trench_detect_start = self.frame_start
if self.trench_detect_end is None:
self.trench_detect_end = self.trench_detect_start + 50 # using 50 consecutive frames for trench detection otherwise specified
self.file_list_trench_detect = self.file_list_trench_detect[self.trench_detect_start:self.trench_detect_end]
[self.height, self.width] = pl.imread(self.file_list_trench_detect[0]).shape
return
def find_drift(self):
lane_path = self.main_path + "/" + self.prefix + "/Lane_" + str(self.lane).zfill(2)
tops = []
peaks = []
file_num = len(self.file_list)
drift_y = open(lane_path + '/drift_y.txt', 'w')
drift_x = open(lane_path + '/drift_x.txt', 'w')
y_shift = [0]
# Todo: parallelization?
for i in range(len(self.file_list)):
# print(self.find_top(i))
tops.append(self.find_top(i))
for i in range(len(tops)-1):
diff = 0
# diff = tops[i+1] - tops[i]
# if diff > 10:
# diff = 0
y_shift.append(diff)
for i in range(len(self.file_list)):
peaks.append(self.find_peaks(i, tops))
# positive: downwards drift
drift_y.write(' '.join(map(str, y_shift)))
# print(y_shift)
x_shift = [0]
for i in range(file_num - 1):
list_a = peaks[i]
list_b = peaks[i + 1]
move = self.pairwise_list_align(list_a, list_b, self.trench_width * 0.75)
x_shift.append(move)
# positive: drift to the right
x_shift = np.cumsum(np.array(x_shift)).astype(int)
drift_x.write(' '.join(map(str, x_shift.tolist())))
self.drift_x = x_shift
self.drift_y = y_shift
self.drift_x_txt = 'drift_x.txt'
self.drift_y_txt = 'drift_y.txt'
return
def read_drift(self):
self.drift_x_txt = 'drift_x.txt'
self.drift_y_txt = 'drift_y.txt'
lane_path = self.main_path + "/" + self.prefix + "/Lane_" + str(self.lane).zfill(2)
self.drift_x_txt = lane_path + "/" + self.drift_x_txt
self.drift_y_txt = lane_path + "/" + self.drift_y_txt
# read files into np array
self.drift_x = np.loadtxt(self.drift_x_txt, dtype=int, delimiter=' ')
self.drift_y = np.loadtxt(self.drift_y_txt, dtype=int, delimiter=' ')
return
def find_top(self, i):
self.get_file_list_for_trench_detection()
im_i = pl.imread(self.file_list_trench_detect[i])
x_per = np.percentile(im_i, 95, axis=1)
intensity_scan = x_per
intensity_scan = intensity_scan / float(sum(intensity_scan))
# normalize intensity
im_min = intensity_scan.min()
im_max = intensity_scan.max()
scaling_factor = (im_max - im_min)
intensity_scan = (intensity_scan - im_min)
intensity_scan = (intensity_scan / scaling_factor)
if self.spatial == 1:
# actually bottoms, but mie..
top = np.where(intensity_scan > 0.2)[0][-1]
else:
top = np.where(intensity_scan > 0.2)[0][0]
return top
def find_peaks(self, i, tops):
self.get_file_list_for_trench_detection()
# self.file_list_trench_detect
im_i = pl.imread(self.file_list_trench_detect[i])
# crop the trench region
im_trenches = im_i[tops[0]:tops[0] + self.trench_length]
im_trenches_perc = np.percentile(im_trenches, 90, axis=0)
# normalize intensity
im_min = im_trenches_perc.min()
im_max = im_trenches_perc.max()
scaling_factor = (im_max - im_min)
im_trenches_perc = (im_trenches_perc - im_min)
im_trenches_perc = (im_trenches_perc / scaling_factor)
peak = self.detect_peaks(im_trenches_perc, mph=0.15, mpd=trench_width)
new_peak = self.peak_correct(peak, im_trenches_perc)
return new_peak
def peak_correct(self, old_peak, im_intensity):
half_trench_width = self.trench_width/2
new_peaks = [old_peak[0]]
for p in old_peak[1:-1]:
half_p_height = im_intensity[p]/2 # int
full_peak = im_intensity[p - half_trench_width:p + half_trench_width+1]
p_tops = np.where(full_peak>half_p_height)
p_left = p - half_trench_width + p_tops[0][0]
p_right = p - half_trench_width + p_tops[0][-1]
p_corrected = (p_left + p_right)/2
new_peaks.append(p_corrected)
new_peaks.append(old_peak[-1])
return new_peaks
def get_trenches(self):
os.chdir(self.file_path)
# use the first 50 frames to identify trench relation
self.get_file_list_for_trench_detection()
frame_num = len(self.file_list_trench_detect)
# using the 85 percentile of the intensity of the first 50 frames as the meta-representation
im_stack = np.zeros((min(50, frame_num), self.height, self.width))
for i in range(min(50, frame_num)):
im_i = pl.imread(self.file_list_trench_detect[i])
if np.max(im_i) > 255:
im_i = self.to_8_bit(im_i)
if self.drift_correct == 1:
# correct for drift
move_x = self.drift_x[i]
temp = np.zeros((self.height, self.width))
if move_x > 0:
temp[:, :self.width-move_x] = im_i[:,move_x:]
else:
temp[:, (-move_x):] = im_i[:, :self.width+move_x]
im_i = temp
im_stack[i] = im_i
perc = np.percentile(im_stack, 85, axis=0).astype(np.uint8)
out_file = "perc_85_frame_50.tiff"
# convert to 8-bit, using the imageJ way
out = PIL.Image.frombytes("L", (self.width, self.height), perc.tobytes())
out.save(out_file)
# identify tops & bottoms
if self.spatial != 2:
intensity_scan = np.percentile(perc, 90, axis=1)
# intensity_scan = np.max(perc,axis=1)
intensity_scan = intensity_scan / float(sum(intensity_scan))
# normalize intensity
im_min = intensity_scan.min()
im_max = intensity_scan.max()
scaling_factor = (im_max - im_min)
intensity_scan = (intensity_scan - im_min)
intensity_scan = (intensity_scan / scaling_factor)
else:
perc_top = perc[:int(self.height/2),:]
perc_bot = perc[int(self.height/2):,:]
intensity_scan_top = np.percentile(perc_top, 90, axis=1)
# intensity_scan_top = np.max(perc_top,axis=1)
intensity_scan_top = intensity_scan_top / float(sum(intensity_scan_top))
# normalize intensity
im_min_top = intensity_scan_top.min()
im_max_top = intensity_scan_top.max()
scaling_factor_top = (im_max_top - im_min_top)
intensity_scan_top = (intensity_scan_top - im_min_top)
intensity_scan_top = (intensity_scan_top / scaling_factor_top)
intensity_scan_bot = np.percentile(perc_bottom, 90, axis=1)
# intensity_scan_bot = np.max(perc_bot, axis=1)
intensity_scan_bot = intensity_scan_bot / float(sum(intensity_scan_bot))
# normalize intensity
im_min_bot = intensity_scan_bot.min()
im_max_bot = intensity_scan_bot.max()
scaling_factor_bot = (im_max_bot - im_min_bot)
intensity_scan_bot = (intensity_scan_bot - im_min_bot)
intensity_scan_bot = (intensity_scan_bot / scaling_factor_bot)
pl.plot(intensity_scan_bot)
pl.show()
pl.plot(intensity_scan_top)
pl.show()
if self.spatial == 0: # top
top = max(0, np.where(intensity_scan > 0.2)[0][0] - 30)
bottom = top + self.trench_length + 60
self.tops.append(top)
self.bottoms.append(bottom)
elif self.spatial == 1: # bottom
bottom = min(self.height,np.where(intensity_scan > 0.2)[0][-1] + 30)
top = bottom - self.trench_length - 60
self.tops.append(top)
self.bottoms.append(bottom)
else: # both
# top one
top = max(0, np.where(intensity_scan_top > 0.2)[0][0] - 30)
bottom = top + self.trench_length + 60
self.tops.append(top)
self.bottoms.append(bottom)
# bottom one
bottom = min(self.height, | np.where(intensity_scan_bot > 0.2) | numpy.where |
from __future__ import print_function, division
import numpy as np
from allennlp.commands.elmo import ElmoEmbedder
import time
import torch
class ElmoEncoder(object):
def __init__(self):
self.elmo = ElmoEmbedder()
def encode_batch(self, sents):
start_time = time.time()
vec_seq = self.elmo.embed_sentences(sents)
elapsed_time = time.time() - start_time
print("embed_sentences {}".format(elapsed_time))
vecs = []
start_time = time.time()
for vec in vec_seq:
vecs.append(self.collapse_vec(vec))
# vecs = torch.stack(vecs)
vecs = np.stack(vecs)
elapsed_time =time.time() - start_time
print("collapse {}".format(elapsed_time))
print("vecs ", vecs.shape)
return vecs
def collapse_vec(self, vec_seq, time_combine_method="max", layer_combine_method="add"):
if time_combine_method == "max":
vec = vec_seq.max(axis=1)
elif time_combine_method == "mean":
vec = vec_seq.mean(axis=1)
elif time_combine_method == "concat":
vec = np.concatenate(vec_seq, axis=1)
elif time_combine_method == "last":
vec = vec_seq[:, -1]
else:
raise NotImplementedError
if layer_combine_method == "add":
vec = vec.sum(axis=0)
elif layer_combine_method == "mean":
vec = vec.mean(axis=0)
elif layer_combine_method == "concat":
vec = np.concatenate(vec, axis=0)
elif layer_combine_method == "last":
vec = vec[-1]
else:
raise NotImplementedError
return vec
def encode(self, sents, time_combine_method="max", layer_combine_method="add"):
""" Load ELMo and encode sents """
vecs = {}
for sent in sents:
vec_seq = self.elmo.embed_sentence(sent)
if time_combine_method == "max":
vec = vec_seq.max(axis=1)
elif time_combine_method == "mean":
vec = vec_seq.mean(axis=1)
elif time_combine_method == "concat":
vec = np.concatenate(vec_seq, axis=1)
elif time_combine_method == "last":
vec = vec_seq[:, -1]
else:
raise NotImplementedError
if layer_combine_method == "add":
vec = vec.sum(axis=0)
elif layer_combine_method == "mean":
vec = vec.mean(axis=0)
elif layer_combine_method == "concat":
vec = | np.concatenate(vec, axis=0) | numpy.concatenate |
"""
Visualize the transformations
Matplotlib:
quiver plot
"""
from mpl_toolkits.mplot3d import axes3d
import matplotlib.pyplot as plt
import numpy as np
# Function to plot a single transformation
def plot_transformation(transformation):
"""
Plot Transformation matrix
...
Parameters
---
transformation: 4x4 transformation matrix
Returns
---
None
Notes
---
RGB -> XYZ
"""
fig = plt.figure()
ax = fig.gca(projection='3d')
# x, y, z of 6 arrows in a quiver plot
x = np.array([0, 0, 0, transformation[0, 3], transformation[0, 3], transformation[0, 3]])
y = np.array([0, 0, 0, transformation[1, 3], transformation[1, 3], transformation[1, 3]])
z = np.array([0, 0, 0, transformation[2, 3], transformation[2, 3], transformation[2, 3]])
# u, v, w of 6 arrows in a quiver plot
u = np.concatenate([np.array([1, 0, 0]), transformation[:3, 0]])
v = np.concatenate([np.array([0, 1, 0]), transformation[:3, 1]])
w = np.concatenate([np.array([0, 0, 1]), transformation[:3, 2]])
# Color(RGB) for 6 arrows, original X, Y, Z and then transformed X, Y, Z
red = np.array([1, 0, 0])
green = | np.array([0, 1, 0]) | numpy.array |
import numpy as np
import matplotlib.pyplot as plt
import os
import properties
import discretize
from discretize.utils import closestPoints
from SimPEG.utils import setKwargs
from SimPEG.electromagnetics import frequency_domain as fdem
from SimPEG.electromagnetics import time_domain as tdem
from .base import LoadableInstance, BaseCasing
from . import model
from .mesh import BaseMeshGenerator
from .info import __version__
class BaseCasingSrc(BaseCasing):
"""
The base class for sources. Inherit this to attach properties.
"""
filename = properties.String(
"filename to serialize properties to",
default="Source.json"
)
modelParameters = LoadableInstance(
"casing parameters",
model.Wholespace
)
meshGenerator = LoadableInstance(
"mesh generator instance",
BaseMeshGenerator
)
physics = properties.StringChoice(
"fdem or tdem simulation?",
choices=["fdem", "tdem"],
required=False
)
src_a = properties.Array(
"A electrode location"
)
src_b = properties.Array(
"B electrode location"
)
def __init__(self, **kwargs):
setKwargs(self, **kwargs)
if self.src_a is None:
self.src_a = self.modelParameters.src_a
if self.src_b is None:
self.src_b = self.modelParameters.src_b
assert self.src_a[1] == self.src_b[1], (
'non y-axis aligned sources have not been implemented'
)
@property
def mesh(self):
"""
discretize mesh
"""
return self.meshGenerator.mesh
# @property
# def src_a(self):
# """
# location of the a-electrode
# """
# if getattr(self, '_src_a', None) is None:
# return self.modelParameters.src_a
# return self._src_a
# @src_a.setter
# def src_a(self, value):
# self._src_a = value
# @property
# def src_b(self):
# """
# location of the b-electrode
# """
# if getattr(self, '_src_b', None) is None:
# return self.modelParameters.src_b
# return self._src_b
# @src_b.setter
# def src_b(self, value):
# self._src_b = value
@property
def casing_a(self):
"""
inner radius of the casing
"""
return self.modelParameters.casing_a
@property
def freqs(self):
"""
frequencies to consider
"""
return self.modelParameters.freqs
@property
def srcList(self):
"""
Source List
"""
if getattr(self, '_srcList', None) is None:
if self.physics.lower() == "fdem":
srcList = [
fdem.sources.RawVec_e([], f, self.s_e.astype("complex"))
for f in self.freqs
]
elif self.physics == "tdem":
srcList = [tdem.sources.RawVec_Grounded([], self.s_e)]
self._srcList = srcList
return self._srcList
class HorizontalElectricDipole(BaseCasingSrc):
"""
A horizontal electric dipole
"""
def __init__(self, **kwargs):
super(HorizontalElectricDipole, self).__init__(**kwargs)
assert self.src_a[2] == self.src_b[2], (
'z locations must be the same for a HED'
)
@property
def src_a_closest(self):
"""
closest face to where we want the return current electrode
"""
if getattr(self, '_src_a_closest', None) is None:
# find the z location of the closest face to the src
src_a_closest = (
self.mesh.gridFx[closestPoints(self.mesh, self.src_a, 'Fz'), :]
)
assert(len(src_a_closest) == 1), 'multiple source locs found'
self._src_a_closest = src_a_closest[0]
return self._src_a_closest
@property
def src_b_closest(self):
"""
closest face to where we want the return current electrode
"""
if getattr(self, '_src_b_closest', None) is None:
# find the z location of the closest face to the src
src_b_closest = (
self.mesh.gridFx[closestPoints(self.mesh, self.src_b, 'Fz'), :]
)
assert(len(src_b_closest) == 1), 'multiple source locs found'
self._src_b_closest = src_b_closest[0]
return self._src_b_closest
@property
def surface_wire(self):
"""
Horizontal part of the wire that runs along the surface
(one cell above) from the center of the well to the return electrode
"""
if getattr(self, '_surface_wire', None) is None:
mesh = self.mesh
src_a = self.src_a
src_b = self.src_b
# horizontally directed wire
surface_wirex = (
(
mesh.gridFx[:, 0] <= np.max(
[self.src_a[0], self.src_b[0]]
)
) &
(
mesh.gridFx[:, 0] >= np.min(
[self.src_a[0], self.src_b[0]]
)
)
)
surface_wirez = (
(mesh.gridFx[:, 2] > src_b[2] - self.mesh.hz.min()/2.) &
(mesh.gridFx[:, 2] < src_b[2] + self.mesh.hz.min()/2.)
)
self._surface_wire = surface_wirex & surface_wirez
if getattr(mesh, 'isSymmetric', False) is False:
surface_wirey = (
(mesh.gridFx[:, 1] > src_b[1] - mesh.hy.min()/2.) &
(mesh.gridFx[:, 1] < src_b[1] + mesh.hy.min()/2.)
)
self._surface_wire = (
self._surface_wire & surface_wirey
)
return self._surface_wire
@property
def surface_wire_direction(self):
"""
direction of the source wire
"""
# todo: extend to the case where the wire is not along the x-axis
return [-1. if self.src_a[0] < self.src_b[0] else 1.][0]
@property
def s_e(self):
"""
electric source term used to build the right hand side of the maxwell
system
"""
if getattr(self, '_s_e', None) is None:
# downhole source
s_x = np.zeros(self.mesh.vnF[0])
s_y = np.zeros(self.mesh.vnF[1])
s_z = np.zeros(self.mesh.vnF[2])
# horizontal part of wire along surface
s_x[self.surface_wire] = self.surface_wire_direction
# assemble the source (downhole grounded primary)
s_e = np.hstack([s_x, s_y, s_z])
self._s_e = s_e/self.mesh.area
# self._s_e = self.mesh.getFaceInnerProduct(invMat=True) * s_e
return self._s_e
def plot(self, ax=None):
"""
Plot the source.
"""
if ax is None:
fig, ax = plt.subplots(1, 1, figsize=(6, 4))
mesh = self.mesh
ax.plot(
mesh.gridFx[self.surface_wire, 0],
mesh.gridFx[self.surface_wire, 2], 'r{}'.format(
['<' if self.surface_wire_direction == -1. else '>'][0]
)
)
@properties.validator
def _check_wire(self):
"""
Make sure that each segment of the wire is only going through a
single face
.. todo:: check that
"""
# check the surface wire only has one y and one z location
surface_wire = self.mesh.gridFx[self.surface_wire, :]
assert len(np.unique(surface_wire[:, 1])) == 1, (
'the surface wire has more than one y-location'
)
assert len(np.unique(surface_wire[:, 2])) == 1, (
'the surface wire has more than one z-location'
)
class VerticalElectricDipole(BaseCasingSrc):
"""
A vertical electric dipole. It is not coupled to the casing
:param CasingSimulations.Model.CasingProperties modelParameters: a casing properties instance
:param discretize.BaseMesh mesh: a discretize mesh
"""
def __init__(self, **kwargs):
super(VerticalElectricDipole, self).__init__(**kwargs)
assert all(self.src_a[:2] == self.src_b[:2]), (
'src_a and src_b must have the same horizontal location'
)
@property
def src_a_closest(self):
"""
closest face to where we want the return current electrode
"""
if getattr(self, '_src_a_closest', None) is None:
# find the z location of the closest face to the src
src_a_closest = (
self.mesh.gridFz[closestPoints(self.mesh, self.src_a, 'Fz'), :]
)
assert(len(src_a_closest) == 1), 'multiple source locs found'
self._src_a_closest = src_a_closest[0]
return self._src_a_closest
@property
def src_b_closest(self):
"""
closest face to where we want the return current electrode
"""
if getattr(self, '_src_b_closest', None) is None:
# find the z location of the closest face to the src
src_b_closest = (
self.mesh.gridFz[closestPoints(self.mesh, self.src_b, 'Fz'), :]
)
assert(len(src_b_closest) == 1), 'multiple source locs found'
self._src_b_closest = src_b_closest[0]
return self._src_b_closest
@property
def _wire_direction(self):
if self.src_a_closest[2] < self.src_b_closest[2]:
return -1
return 1
@property
def wire_in_borehole(self):
"""
Indices of the verically directed wire inside of the borehole. It goes
through the center of the well
"""
if getattr(self, '_wire_in_borehole', None) is None:
mesh = self.mesh
src_a = self.src_a
src_b = self.src_b
wire_in_boreholex = (
(mesh.gridFz[:, 0] < self.src_a_closest[0] + mesh.hx.min()/2.) &
(mesh.gridFz[:, 0] > self.src_a_closest[0] - mesh.hx.min()/2.)
)
wire_in_boreholez = (
(
mesh.gridFz[:, 2] >=
np.min([src_a[2], src_b[2]]) - 0.5*mesh.hz.min()
) &
(
mesh.gridFz[:, 2] <=
np.max([src_a[2], src_b[2]]) + 0.5*mesh.hz.min()
)
)
self._wire_in_borehole = wire_in_boreholex & wire_in_boreholez
if getattr(mesh, 'isSymmetric', False) is False:
wire_in_boreholey = (
(mesh.gridFz[:, 1] > src_a[1] - mesh.hy.min()/2.) &
(mesh.gridFz[:, 1] < src_a[1] + mesh.hy.min()/2.)
)
self._wire_in_borehole = (
self._wire_in_borehole & wire_in_boreholey
)
return self._wire_in_borehole
@property
def s_e(self):
"""
Source List
"""
if getattr(self, '_s_e', None) is None:
# downhole source
s_x = np.zeros(self.mesh.vnF[0])
s_y = np.zeros(self.mesh.vnF[1])
s_z = np.zeros(self.mesh.vnF[2])
s_z[self.wire_in_borehole] = self._wire_direction # part of wire through borehole
# assemble the source (downhole grounded primary)
s_e = | np.hstack([s_x, s_y, s_z]) | numpy.hstack |
#!/usr/bin/env python3
"""
logistic regression
"""
import numpy as np
from loguru import logger
from scipy.optimize import minimize
from sklearn.utils.extmath import safe_sparse_dot
from scipy.special import logsumexp
from sklearn.metrics import accuracy_score
from sklearn.preprocessing import LabelEncoder, LabelBinarizer
from sklearn.linear_model import SGDClassifier
BATCH_SIZE = 32
# https://machinelearningmastery.com/implement-logistic-regression-stochastic-gradient-descent-scratch-python/
# https://github.com/iamkucuk/Logistic-Regression-With-Mini-Batch-Gradient-Descent/blob/master/logistic_regression_notebook.ipynb
# https://www.geeksforgeeks.org/ml-mini-batch-gradient-descent-with-python/
# http://www.oranlooney.com/post/ml-from-scratch-part-2-logistic-regression/
# https://stats.stackexchange.com/a/117928 - mini-batch vs batch vs epoch
# https://towardsdatascience.com/understanding-the-scaling-of-l%C2%B2-regularization-in-the-context-of-neural-networks-e3d25f8b50db
# https://github.com/sergei-bondarenko/machine-learning/blob/master/l2.ipynb
# https://github.com/ral99/SGDForLinearModels/blob/master/pysgd/linear_models.py
def squared_norm(x):
"""Squared Euclidean or Frobenius norm of x.
Faster than norm(x) ** 2.
"""
x = np.ravel(x, order='K')
return np.dot(x, x)
def _multinomial_loss(w, X, Y, alpha, sample_weight):
"""Computes the multinomial loss, gradient and class probabilities."""
n_classes = Y.shape[1]
n_features = X.shape[1]
fit_intercept = w.size == (n_classes * (n_features + 1))
w = w.reshape(n_classes, -1)
sample_weight = sample_weight[:, np.newaxis]
if fit_intercept:
intercept = w[:, -1]
w = w[:, :-1]
else:
intercept = 0
p = safe_sparse_dot(X, w.T)
p += intercept
p -= logsumexp(p, axis=1)[:, np.newaxis]
loss = -(sample_weight * Y * p).sum()
loss += 0.5 * alpha * squared_norm(w)
p = np.exp(p, p)
return loss, p, w
def _multinomial_loss_grad(w, X, Y, alpha, sample_weight):
"""Computes multinomial loss and class probabilities."""
n_classes = Y.shape[1]
n_features = X.shape[1]
fit_intercept = (w.size == n_classes * (n_features + 1))
grad = np.zeros((n_classes, n_features + bool(fit_intercept)),
dtype=X.dtype)
loss, p, w = _multinomial_loss(w, X, Y, alpha, sample_weight)
sample_weight = sample_weight[:, np.newaxis]
diff = sample_weight * (p - Y)
grad[:, :n_features] = safe_sparse_dot(diff.T, X)
grad[:, :n_features] += alpha * w
if fit_intercept:
grad[:, -1] = diff.sum(axis=0)
return loss, grad.ravel(), p
def _logistic_regression_path(X, y, Cs=10, fit_intercept=True,
max_iter=100, tol=1e-4, verbose=0,
pos_class=None, coef=None):
# Preprocessing.
_, n_features = X.shape
print(X.shape, y.shape)
classes = np.unique(y)
# If sample weights exist, convert them to array (support for lists)
# and check length
# Otherwise set them to 1 for all examples
sample_weight = | np.ones(X.shape[0]) | numpy.ones |
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#pip install tensorflow==2.3.1
#pip install tensorflow-quantum
import tensorflow as tf
import tensorflow_quantum as tfq
import cirq
import sympy
import numpy as np
# visualization tools#%matplotlib inline
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
from cirq.contrib.svg import SVGCircuit
qubit = cirq.GridQubit(0, 0)
# Define some circuits.
circuit1 = cirq.Circuit(cirq.X(qubit))
circuit2 = cirq.Circuit(cirq.H(qubit))
# Convert to a tensor.
input_circuit_tensor = tfq.convert_to_tensor([circuit1, circuit2])
# Define a circuit that we want to append
y_circuit = cirq.Circuit(cirq.Y(qubit))
# Instantiate our layer
y_appender = tfq.layers.AddCircuit()
# Run our circuit tensor through the layer and save the output.
output_circuit_tensor = y_appender(input_circuit_tensor, append=y_circuit)
print(tfq.from_tensor(input_circuit_tensor))
print(tfq.from_tensor(output_circuit_tensor))
def generate_data(qubits):
"""Generate training and testing data."""
n_rounds = 20 # Produces n_rounds * n_qubits datapoints.
excitations = []
labels = []
for n in range(n_rounds):
for bit in qubits:
rng = np.random.uniform(-np.pi, np.pi)
excitations.append(cirq.Circuit(cirq.rx(rng)(bit)))
labels.append(1 if (-np.pi / 2) <= rng <= (np.pi / 2) else -1)
split_ind = int(len(excitations) * 0.7)
train_excitations = excitations[:split_ind]
test_excitations = excitations[split_ind:]
train_labels = labels[:split_ind]
test_labels = labels[split_ind:]
return tfq.convert_to_tensor(train_excitations), np.array(train_labels), \
tfq.convert_to_tensor(test_excitations), | np.array(test_labels) | numpy.array |
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_multilabel_classification
from sklearn.multiclass import OneVsRestClassifier
from sklearn.svm import SVC
from sklearn.decomposition import PCA
from sklearn.cross_decomposition import CCA
def plot_hyperplane(clf, min_x, max_x, linestyle, label):
# get the separating hyperplane
w = clf.coef_[0]
a = -w[0] / w[1]
xx = np.linspace(min_x - 5, max_x + 5) # make sure the line is long enough
yy = a * xx - (clf.intercept_[0]) / w[1]
plt.plot(xx, yy, linestyle, label=label)
def plot_subfigure(X, Y, subplot, title, transform):
if transform == "pca":
X = PCA(n_components=2).fit_transform(X)
elif transform == "cca":
X = CCA(n_components=2).fit(X, Y).transform(X)
else:
raise ValueError
min_x = np.min(X[:, 0])
max_x = | np.max(X[:, 0]) | numpy.max |
from typing import Callable
import numpy
na = numpy.newaxis
def calc_m_sq_sin_sq_para(tensor_sigma, flag_tensor_sigma: bool = False):
"""Calculate the term P1 for paramagnetic sublattice.
For details see documentation "Integrated intensity from powder diffraction".
"""
sigma_11 = tensor_sigma[0]
sigma_12 = tensor_sigma[1]
sigma_22 = tensor_sigma[4]
p_1 = 0.5*(numpy.square(numpy.abs(sigma_11)) + numpy.square(numpy.abs(sigma_22))) + \
numpy.square(numpy.abs(sigma_12))
dder = {}
if flag_tensor_sigma:
ones = numpy.ones_like(sigma_11.real)
dder_sigma_11_real = sigma_11.real * ones
dder_sigma_11_imag = sigma_11.imag * ones
dder_sigma_22_real = sigma_22.real * ones
dder_sigma_22_imag = sigma_22.imag * ones
dder_sigma_12_real = 2*sigma_12.real * ones
dder_sigma_12_imag = 2*sigma_12.imag * ones
zeros = numpy.zeros_like(dder_sigma_11_real)
dder["tensor_sigma_real"] = numpy.stack([
dder_sigma_11_real, dder_sigma_12_real, zeros,
zeros, dder_sigma_22_real, zeros, zeros, zeros, zeros], axis=0)
dder["tensor_sigma_imag"] = numpy.stack([
dder_sigma_11_imag, dder_sigma_12_imag, zeros,
zeros, dder_sigma_22_imag, zeros, zeros, zeros, zeros], axis=0)
return p_1, dder
def calc_m_sq_cos_sq_para(tensor_sigma, flag_tensor_sigma: bool = False):
"""Calculate the term P2 for paramagnetic sublattice.
For details see documentation "Integrated intensity from powder diffraction".
"""
sigma_13 = tensor_sigma[2]
sigma_23 = tensor_sigma[5]
p_2 = numpy.square(numpy.abs(sigma_13)) + numpy.square(numpy.abs(sigma_23))
dder = {}
if flag_tensor_sigma:
ones = numpy.ones_like(sigma_13.real)
dder_sigma_13_real = 2*sigma_13.real * ones
dder_sigma_13_imag = 2*sigma_13.imag * ones
dder_sigma_23_real = 2*sigma_23.real * ones
dder_sigma_23_imag = 2*sigma_23.imag * ones
zeros = numpy.zeros_like(dder_sigma_13_real)
dder["tensor_sigma_real"] = numpy.stack([
zeros, zeros, dder_sigma_13_real,
zeros, zeros, dder_sigma_23_real, zeros, zeros, zeros], axis=0)
dder["tensor_sigma_imag"] = numpy.stack([
zeros, zeros, dder_sigma_13_imag,
zeros, zeros, dder_sigma_23_imag, zeros, zeros, zeros], axis=0)
return p_2, dder
def calc_cross_term_para(f_nucl, tensor_sigma, flag_f_nucl: bool = False, flag_tensor_sigma: bool = False):
"""Calculate the term P3 for paramagnetic sublattice.
For details see documentation "Integrated intensity from powder diffraction".
"""
sigma_11 = tensor_sigma[0]
sigma_22 = tensor_sigma[4]
p_3 = f_nucl.real * (sigma_11.real + sigma_22.real) + f_nucl.imag * (sigma_11.imag + sigma_22.imag)
dder = {}
if flag_f_nucl:
dder["f_nucl_real"] = (sigma_11.real + sigma_22.real)*numpy.ones_like(f_nucl.real)
dder["f_nucl_imag"] = (sigma_11.imag + sigma_22.imag)*numpy.ones_like(f_nucl.imag)
if flag_tensor_sigma:
ones = numpy.ones_like(sigma_11.real)
dder_sigma_11_real = f_nucl.real * ones
dder_sigma_11_imag = f_nucl.imag * ones
dder_sigma_22_real = f_nucl.real * ones
dder_sigma_22_imag = f_nucl.imag * ones
zeros = numpy.zeros_like(dder_sigma_11_real)
dder["tensor_sigma_real"] = numpy.stack([
dder_sigma_11_real, zeros, zeros,
zeros, dder_sigma_22_real, zeros, zeros, zeros, zeros], axis=0)
dder["tensor_sigma_imag"] = numpy.stack([
dder_sigma_11_imag, zeros, zeros,
zeros, dder_sigma_22_imag, zeros, zeros, zeros, zeros], axis=0)
return p_3, dder
def calc_chiral_term_cos_sin_sq_para(tensor_sigma, flag_tensor_sigma: bool = False):
"""Calculate the term P4 for paramagnetic sublattice.
For details see documentation "Integrated intensity from powder diffraction".
"""
sigma_11 = tensor_sigma[0]
sigma_12 = tensor_sigma[1]
sigma_21 = tensor_sigma[3]
sigma_22 = tensor_sigma[4]
p_4 = sigma_21.real*sigma_11.imag - sigma_21.imag*sigma_11.real + \
sigma_22.real*sigma_12.imag - sigma_22.imag*sigma_12.real
dder = {}
if flag_tensor_sigma:
ones = numpy.ones_like(sigma_11.real)
dder_sigma_11_real = - sigma_21.imag * ones
dder_sigma_11_imag = sigma_21.real * ones
dder_sigma_12_real = - sigma_22.imag * ones
dder_sigma_12_imag = sigma_22.real * ones
dder_sigma_21_real = sigma_11.imag * ones
dder_sigma_21_imag = - sigma_11.real * ones
dder_sigma_22_real = sigma_12.imag * ones
dder_sigma_22_imag = - sigma_12.real * ones
zeros = numpy.zeros_like(dder_sigma_11_real)
dder["tensor_sigma_real"] = numpy.stack([
dder_sigma_11_real, dder_sigma_12_real, zeros,
dder_sigma_21_real, dder_sigma_22_real, zeros, zeros, zeros, zeros], axis=0)
dder["tensor_sigma_imag"] = numpy.stack([
dder_sigma_11_imag, dder_sigma_12_imag, zeros,
dder_sigma_21_imag, dder_sigma_22_imag, zeros, zeros, zeros, zeros], axis=0)
return p_4, dder
def calc_chiral_term_cos_cube_para(tensor_sigma, flag_tensor_sigma: bool = False):
"""Calculate the term P5 for paramagnetic sublattice.
For details see documentation "Integrated intensity from powder diffraction".
"""
sigma_13 = tensor_sigma[2]
sigma_23 = tensor_sigma[5]
p_5 = 2*(sigma_23.real*sigma_13.imag - sigma_23.imag*sigma_13.real)
dder = {}
if flag_tensor_sigma:
ones = numpy.ones_like(sigma_13.real)
dder_sigma_13_real = -2 * sigma_23.imag * ones
dder_sigma_13_imag = 2 * sigma_23.real * ones
dder_sigma_23_real = 2 * sigma_13.imag * ones
dder_sigma_23_imag = -2 * sigma_13.real * ones
zeros = numpy.zeros_like(dder_sigma_13_real)
dder["tensor_sigma_real"] = numpy.stack([
zeros, zeros, dder_sigma_13_real,
zeros, zeros, dder_sigma_23_real, zeros, zeros, zeros], axis=0)
dder["tensor_sigma_imag"] = numpy.stack([
zeros, zeros, dder_sigma_13_imag,
zeros, zeros, dder_sigma_23_imag, zeros, zeros, zeros], axis=0)
return p_5, dder
def calc_cross_term_ordered(f_nucl, f_m_perp, flag_f_nucl: bool = False, flag_f_m_perp: bool = False):
"""Calculate the term O1 for the magnetically ordered sublattice.
For details see documentation "Integrated intensity from powder diffraction".
"""
f_m_perp_z = f_m_perp[2]
o_1 = 2*(f_nucl.real*f_m_perp_z.real + f_nucl.imag*f_m_perp_z.imag)
dder = {}
if flag_f_nucl:
dder["f_nucl_real"] = 2 * f_m_perp_z.real * numpy.ones_like(f_nucl.real)
dder["f_nucl_imag"] = 2 * f_m_perp_z.imag * numpy.ones_like(f_nucl.imag)
if flag_f_m_perp:
dder_f_m_perp_z_real = 2 * f_nucl.real * numpy.ones_like(f_m_perp_z.real)
dder_f_m_perp_z_imag = 2 * f_nucl.imag * numpy.ones_like(f_m_perp_z.imag)
zeros = numpy.zeros_like(dder_f_m_perp_z_real)
dder["f_m_perp_real"] = numpy.stack([zeros, zeros, dder_f_m_perp_z_real], axis=0)
dder["f_m_perp_imag"] = numpy.stack([zeros, zeros, dder_f_m_perp_z_imag], axis=0)
return o_1, dder
def calc_chiral_term_ordered(f_m_perp, flag_f_m_perp: bool = False):
"""Calculate the term O2 for the magnetically ordered sublattice.
For details see documentation "Integrated intensity from powder diffraction".
"""
f_m_perp_x = f_m_perp[0]
f_m_perp_y = f_m_perp[1]
o_2 = 2*(f_m_perp_y.real*f_m_perp_x.imag - f_m_perp_y.imag*f_m_perp_x.real)
dder = {}
if flag_f_m_perp:
dder_f_m_perp_x_real = -2 * f_m_perp_y.imag * numpy.ones_like(f_m_perp_x.real)
dder_f_m_perp_x_imag = 2 * f_m_perp_y.real * numpy.ones_like(f_m_perp_x.imag)
dder_f_m_perp_y_real = 2 * f_m_perp_x.imag * numpy.ones_like(f_m_perp_y.real)
dder_f_m_perp_y_imag = -2 * f_m_perp_x.real * numpy.ones_like(f_m_perp_y.imag)
zeros = numpy.zeros_like(dder_f_m_perp_x_real)
dder["f_m_perp_real"] = numpy.stack([dder_f_m_perp_x_real, dder_f_m_perp_y_real, zeros], axis=0)
dder["f_m_perp_imag"] = numpy.stack([dder_f_m_perp_x_imag, dder_f_m_perp_y_imag, zeros], axis=0)
return o_2, dder
def calc_m_sq_mix(tensor_sigma, f_m_perp, flag_tensor_sigma: bool = False, flag_f_m_perp: bool = False):
"""Calculate the term M1 for the case of coexistiong paramatic and magnetically ordered sublattices.
For details see documentation "Integrated intensity from powder diffraction".
tensor_sigma describe paramagnetic sublattice
f_m_perp describe ordered sublattice
"""
sigma_13 = tensor_sigma[2]
sigma_23 = tensor_sigma[5]
f_m_perp_x = f_m_perp[0]
f_m_perp_y = f_m_perp[1]
m_1 = 2*(sigma_13.real*f_m_perp_x.real + sigma_13.imag*f_m_perp_x.imag +
sigma_23.real*f_m_perp_y.real + sigma_23.imag*f_m_perp_y.imag)
dder = {}
if flag_tensor_sigma:
dder_sigma_13_real = 2*f_m_perp_x.real*numpy.ones_like(sigma_13.real)
dder_sigma_13_imag = 2*f_m_perp_x.imag*numpy.ones_like(sigma_13.imag)
dder_sigma_23_real = 2*f_m_perp_y.real*numpy.ones_like(sigma_23.real)
dder_sigma_23_imag = 2*f_m_perp_y.imag*numpy.ones_like(sigma_23.imag)
zeros = numpy.zeros_like(dder_sigma_13_real)
dder["tensor_sigma_real"] = numpy.stack([
zeros, zeros, dder_sigma_13_real,
zeros, zeros, dder_sigma_23_real,
zeros, zeros, zeros], axis=0)
dder["tensor_sigma_imag"] = numpy.stack([
zeros, zeros, dder_sigma_13_imag,
zeros, zeros, dder_sigma_23_imag,
zeros, zeros, zeros], axis=0)
if flag_f_m_perp:
dder_f_m_perp_x_real = 2 * sigma_13.real * numpy.ones_like(f_m_perp_x.real)
dder_f_m_perp_x_imag = 2 * sigma_13.imag * numpy.ones_like(f_m_perp_x.imag)
dder_f_m_perp_y_real = 2 * sigma_23.real * numpy.ones_like(f_m_perp_y.real)
dder_f_m_perp_y_imag = 2 * sigma_13.imag * numpy.ones_like(f_m_perp_y.imag)
zeros = numpy.zeros_like(dder_f_m_perp_x_real)
dder["f_m_perp_real"] = numpy.stack([dder_f_m_perp_x_real, dder_f_m_perp_y_real, zeros], axis=0)
dder["f_m_perp_imag"] = numpy.stack([dder_f_m_perp_x_imag, dder_f_m_perp_y_imag, zeros], axis=0)
return m_1, dder
def calc_chiral_term_sin_sq_mix(
tensor_sigma, f_m_perp, flag_tensor_sigma: bool = False, flag_f_m_perp: bool = False):
"""Calculate the term M2 for the case of coexistiong paramatic and magnetically ordered sublattices.
For details see documentation "Integrated intensity from powder diffraction".
tensor_sigma describe paramagnetic sublattice
f_m_perp describe ordered sublattice
"""
sigma_12 = tensor_sigma[1]
sigma_21 = tensor_sigma[3]
f_m_perp_z = f_m_perp[2]
m_2 = sigma_12.real*f_m_perp_z.imag - sigma_12.imag*f_m_perp_z.real + \
sigma_21.imag*f_m_perp_z.real - sigma_21.real*f_m_perp_z.imag
dder = {}
if flag_tensor_sigma:
dder_sigma_12_real = f_m_perp_z.imag*numpy.ones_like(sigma_12.real)
dder_sigma_12_imag = -f_m_perp_z.real*numpy.ones_like(sigma_12.imag)
dder_sigma_21_real = -f_m_perp_z.imag*numpy.ones_like(sigma_21.real)
dder_sigma_21_imag = f_m_perp_z.real* | numpy.ones_like(sigma_21.imag) | numpy.ones_like |
import gym
import numpy as np
from typing import Tuple, List
from mae_envs.wrappers.util import update_obs_space
from mujoco_worldgen.util.types import store_args
def get_all_integer_partitions(n, min_team_size=1, max_team_size=np.inf):
'''
Return a list of all integer partitions of n.
Args:
n (int): number of entities.
min_team_size (int): minimum number of entities in a partition
max_team_size (int): maximum number of entities in a partition
'''
if n <= max_team_size:
yield (n,)
for i in range(min_team_size, n // 2 + 1):
for p in get_all_integer_partitions(n - i, i, max_team_size):
yield (i,) + p
class RUSPGenerator:
'''
Helper class to generate the randomized uncertain relationship graph. Agents are first
partitioned into groups. Within each group we randomize the amount each agent shares
reward with everyone else in the group. We then sample independent noise such that each
agent observes an inependent noisy observation of the relationship graph.
Reward sharing values are sampled from a beta distribution with parameters alpha and beta. For
all results in the paper except where we experiment with team hardness, we set both
alpha and beta to 1.
To compute noise added to the relationship graphs, we first sample the noise level (standard devation
of a gaussian) from a uniform distribution independently per relationship, per agent.
We then sample a single value from this Gaussian with sampled standard deviation centered around the true value
Args:
min_team_size (int): minimum size of a group of agents with non-zero reward sharing amounts
max_team_size (int): maximum size of a group of agents with non-zero reward sharing amounts
alpha (float): reward sharing beta distribution parameter
beta (float): reward sharing beta distribution parameter
allow_diagonal_non_1 (bool): if True then diagonal elements of the reward sharing matrix (an agents
weight over its own reward) can be less than 1 (sampled from the same beta distribution as for other
relationships)
obs_noise_std_range (tuple of float): Range (maximum and minimum) that noise standard deviation can be sampled
from.
'''
@store_args
def __init__(self, *,
# Prosociality Graph
min_team_size: int = 1,
max_team_size: int = 1,
alpha: float = 1.0,
beta: float = 1.0,
allow_diagonal_non_1: bool = True,
# Uncertainty
obs_noise_std_range: Tuple[float] = [0.0, 1.0],
**kwargs):
assert min_team_size >= 1
assert max_team_size >= 1
assert max_team_size >= min_team_size
assert alpha > 0
assert beta > 0
assert np.all(np.array(obs_noise_std_range) >= 0)
self.cached_partitions = {} # Keys are (n_agents, min_team_size, max_team_size)
def _partition_agents(self, n_agents, min_team_size, max_team_size):
'''
Return a random partition from the set of all integer partitions
'''
settings = (n_agents, min_team_size, max_team_size)
if settings not in self.cached_partitions:
self.cached_partitions[settings] = list(get_all_integer_partitions(n_agents, min_team_size, max_team_size))
all_partitions = self.cached_partitions[settings]
random_partitions = all_partitions[np.random.randint(len(all_partitions))]
return random_partitions
def _generate_social_preferences(self, n_agents):
'''
Generate the relationship graph (without uncertainty)
'''
# Generate random partitions
if self.max_team_size != self.min_team_size:
random_partitions = self._partition_agents(n_agents, self.min_team_size, self.max_team_size)
else:
random_partitions = np.random.randint(self.min_team_size, self.max_team_size + 1, (n_agents))
random_partitions = np.cumsum(random_partitions)
random_partitions = random_partitions[random_partitions <= n_agents]
random_partitions = np.concatenate([[0], random_partitions, [n_agents]])
# Convert random partitions into a block diagonal matrix
self.reward_xform_mat = np.zeros((n_agents, n_agents))
for i in range(len(random_partitions) - 1):
block = slice(random_partitions[i], random_partitions[i + 1])
self.reward_xform_mat[block, block] = 1
# Randomize reward sharing values in block diagonal matrix
self.reward_xform_mat *= np.random.beta(a=self.alpha, b=self.beta, size=(n_agents, n_agents))
# Make sure off-diagonal is symmetric
self.reward_xform_mat = np.tril(self.reward_xform_mat, -1) + np.tril(self.reward_xform_mat).T
if not self.allow_diagonal_non_1:
np.fill_diagonal(self.reward_xform_mat, 1.0)
# Randomly shuffle agents so that agent indicies do not matter
random_shuffle_mat = | np.eye(n_agents) | numpy.eye |
import copy
import logging
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader
from utils.toolkit import tensor2numpy, accuracy
from scipy.spatial.distance import cdist
EPSILON = 1e-8
batch_size = 64
class BaseLearner(object):
def __init__(self, args):
self._cur_task = -1
self._known_classes = 0
self._total_classes = 0
self._network = None
self._old_network = None
self._data_memory, self._targets_memory = np.array([]), np.array([])
self.topk = 5
self._memory_size = args['memory_size']
self._memory_per_class = args['memory_per_class']
self._fixed_memory = args['fixed_memory']
self._device = args['device']
self._multiple_gpus = [args['device']]
@property
def exemplar_size(self):
assert len(self._data_memory) == len(self._targets_memory), 'Exemplar size error.'
return len(self._targets_memory)
@property
def samples_per_class(self):
if self._fixed_memory:
return self._memory_per_class
else:
assert self._total_classes != 0, 'Total classes is 0'
return (self._memory_size // self._total_classes)
@property
def feature_dim(self):
if isinstance(self._network, nn.DataParallel):
return self._network.module.feature_dim
else:
return self._network.feature_dim
def build_rehearsal_memory(self, data_manager, per_class):
if self._fixed_memory:
self._construct_exemplar_unified(data_manager, per_class)
else:
self._reduce_exemplar(data_manager, per_class)
self._construct_exemplar(data_manager, per_class)
def save_checkpoint(self, filename):
self._network.cpu()
save_dict = {
'tasks': self._cur_task,
'model_state_dict': self._network.state_dict(),
}
torch.save(save_dict, '{}_{}.pkl'.format(filename, self._cur_task))
def after_task(self):
pass
def _evaluate(self, y_pred, y_true):
ret = {}
grouped = accuracy(y_pred.T[0], y_true, self._known_classes)
ret['grouped'] = grouped
ret['top1'] = grouped['total']
ret['top{}'.format(self.topk)] = np.around((y_pred.T == np.tile(y_true, (self.topk, 1))).sum()*100/len(y_true),
decimals=2)
return ret
def eval_task(self):
y_pred, y_true = self._eval_cnn(self.test_loader)
cnn_accy = self._evaluate(y_pred, y_true)
if hasattr(self, '_class_means'):
y_pred, y_true = self._eval_nme(self.test_loader, self._class_means)
nme_accy = self._evaluate(y_pred, y_true)
else:
nme_accy = None
return cnn_accy, nme_accy
def incremental_train(self):
pass
def _train(self):
pass
def _get_memory(self):
if len(self._data_memory) == 0:
return None
else:
return (self._data_memory, self._targets_memory)
def _compute_accuracy(self, model, loader):
model.eval()
correct, total = 0, 0
for i, (_, inputs, targets) in enumerate(loader):
inputs = inputs.to(self._device)
with torch.no_grad():
outputs = model(inputs)['logits']
predicts = torch.max(outputs, dim=1)[1]
correct += (predicts.cpu() == targets).sum()
total += len(targets)
return np.around(tensor2numpy(correct)*100 / total, decimals=2)
def _eval_cnn(self, loader):
self._network.eval()
y_pred, y_true = [], []
for _, (_, inputs, targets) in enumerate(loader):
inputs = inputs.to(self._device)
with torch.no_grad():
outputs = self._network(inputs)['logits']
predicts = torch.topk(outputs, k=self.topk, dim=1, largest=True, sorted=True)[1] # [bs, topk]
y_pred.append(predicts.cpu().numpy())
y_true.append(targets.cpu().numpy())
return np.concatenate(y_pred), np.concatenate(y_true) # [N, topk]
def _eval_nme(self, loader, class_means):
self._network.eval()
vectors, y_true = self._extract_vectors(loader)
vectors = (vectors.T / (np.linalg.norm(vectors.T, axis=0) + EPSILON)).T
dists = cdist(class_means, vectors, 'sqeuclidean') # [nb_classes, N]
scores = dists.T # [N, nb_classes], choose the one with the smallest distance
return np.argsort(scores, axis=1)[:, :self.topk], y_true # [N, topk]
def _extract_vectors(self, loader):
self._network.eval()
vectors, targets = [], []
for _, _inputs, _targets in loader:
_targets = _targets.numpy()
if isinstance(self._network, nn.DataParallel):
_vectors = tensor2numpy(self._network.module.extract_vector(_inputs.to(self._device)))
else:
_vectors = tensor2numpy(self._network.extract_vector(_inputs.to(self._device)))
vectors.append(_vectors)
targets.append(_targets)
return np.concatenate(vectors), np.concatenate(targets)
def _reduce_exemplar(self, data_manager, m):
logging.info('Reducing exemplars...({} per classes)'.format(m))
dummy_data, dummy_targets = copy.deepcopy(self._data_memory), copy.deepcopy(self._targets_memory)
self._class_means = np.zeros((self._total_classes, self.feature_dim))
self._data_memory, self._targets_memory = np.array([]), np.array([])
for class_idx in range(self._known_classes):
mask = np.where(dummy_targets == class_idx)[0]
dd, dt = dummy_data[mask][:m], dummy_targets[mask][:m]
self._data_memory = np.concatenate((self._data_memory, dd)) if len(self._data_memory) != 0 else dd
self._targets_memory = np.concatenate((self._targets_memory, dt)) if len(self._targets_memory) != 0 else dt
# Exemplar mean
idx_dataset = data_manager.get_dataset([], source='train', mode='test', appendent=(dd, dt))
idx_loader = DataLoader(idx_dataset, batch_size=batch_size, shuffle=False, num_workers=4)
vectors, _ = self._extract_vectors(idx_loader)
vectors = (vectors.T / (np.linalg.norm(vectors.T, axis=0) + EPSILON)).T
mean = np.mean(vectors, axis=0)
mean = mean / np.linalg.norm(mean)
self._class_means[class_idx, :] = mean
def _construct_exemplar(self, data_manager, m):
logging.info('Constructing exemplars...({} per classes)'.format(m))
for class_idx in range(self._known_classes, self._total_classes):
data, targets, idx_dataset = data_manager.get_dataset(np.arange(class_idx, class_idx+1), source='train',
mode='test', ret_data=True)
idx_loader = DataLoader(idx_dataset, batch_size=batch_size, shuffle=False, num_workers=4)
vectors, _ = self._extract_vectors(idx_loader)
vectors = (vectors.T / (np.linalg.norm(vectors.T, axis=0) + EPSILON)).T
class_mean = np.mean(vectors, axis=0)
# Select
selected_exemplars = []
exemplar_vectors = [] # [n, feature_dim]
for k in range(1, m+1):
S = np.sum(exemplar_vectors, axis=0) # [feature_dim] sum of selected exemplars vectors
mu_p = (vectors + S) / k # [n, feature_dim] sum to all vectors
i = np.argmin(np.sqrt(np.sum((class_mean - mu_p) ** 2, axis=1)))
selected_exemplars.append(np.array(data[i])) # New object to avoid passing by inference
exemplar_vectors.append(np.array(vectors[i])) # New object to avoid passing by inference
vectors = np.delete(vectors, i, axis=0) # Remove it to avoid duplicative selection
data = np.delete(data, i, axis=0) # Remove it to avoid duplicative selection
# uniques = np.unique(selected_exemplars, axis=0)
# print('Unique elements: {}'.format(len(uniques)))
selected_exemplars = np.array(selected_exemplars)
exemplar_targets = np.full(m, class_idx)
self._data_memory = np.concatenate((self._data_memory, selected_exemplars)) if len(self._data_memory) != 0 \
else selected_exemplars
self._targets_memory = np.concatenate((self._targets_memory, exemplar_targets)) if \
len(self._targets_memory) != 0 else exemplar_targets
# Exemplar mean
idx_dataset = data_manager.get_dataset([], source='train', mode='test',
appendent=(selected_exemplars, exemplar_targets))
idx_loader = DataLoader(idx_dataset, batch_size=batch_size, shuffle=False, num_workers=4)
vectors, _ = self._extract_vectors(idx_loader)
vectors = (vectors.T / (np.linalg.norm(vectors.T, axis=0) + EPSILON)).T
mean = np.mean(vectors, axis=0)
mean = mean / np.linalg.norm(mean)
self._class_means[class_idx, :] = mean
def _construct_exemplar_unified(self, data_manager, m):
logging.info('Constructing exemplars for new classes...({} per classes)'.format(m))
_class_means = np.zeros((self._total_classes, self.feature_dim))
# Calculate the means of old classes with newly trained network
for class_idx in range(self._known_classes):
mask = np.where(self._targets_memory == class_idx)[0]
class_data, class_targets = self._data_memory[mask], self._targets_memory[mask]
class_dset = data_manager.get_dataset([], source='train', mode='test',
appendent=(class_data, class_targets))
class_loader = DataLoader(class_dset, batch_size=batch_size, shuffle=False, num_workers=4)
vectors, _ = self._extract_vectors(class_loader)
vectors = (vectors.T / (np.linalg.norm(vectors.T, axis=0) + EPSILON)).T
mean = np.mean(vectors, axis=0)
mean = mean / np.linalg.norm(mean)
_class_means[class_idx, :] = mean
# Construct exemplars for new classes and calculate the means
for class_idx in range(self._known_classes, self._total_classes):
data, targets, class_dset = data_manager.get_dataset(np.arange(class_idx, class_idx+1), source='train',
mode='test', ret_data=True)
class_loader = DataLoader(class_dset, batch_size=batch_size, shuffle=False, num_workers=4)
vectors, _ = self._extract_vectors(class_loader)
vectors = (vectors.T / ( | np.linalg.norm(vectors.T, axis=0) | numpy.linalg.norm |
"""
csalt_models.py
Usage:
- import modules
Outputs:
- various
"""
import os, sys
import numpy as np
from astropy.io import fits
from vis_sample import vis_sample
from scipy.ndimage import convolve1d
from scipy.interpolate import interp1d
from vis_sample.classes import *
from simple_disk import simple_disk
import const as const
import matplotlib.pyplot as plt
def cube_parser(pars, FOV=8, Npix=128, dist=150, r_min=0, r_max=500, r0=10,
RA=240, DEC=-40, restfreq=230.538e9, Vsys=0, vel=None,
datafile=None, outfile=None):
### Generate a model disk
disk = simple_disk(pars[0], pars[1], x0=0, y0=0, dist=dist, mstar=pars[2],
r_min=r_min, r_max=r_max, r0=r0, r_l=pars[3],
z0=pars[4], zpsi=pars[5], zphi=np.inf,
Tb0=pars[6], Tbq=pars[7], Tbeps=np.inf, Tbmax=1000,
Tbmax_b=pars[8], tau0=1000, tauq=0, taueta=np.inf,
taumax=5000, dV0=pars[9], dVq=0.5*pars[7], dVmax=1000,
FOV=FOV, Npix=Npix)
### Set velocities for cube (either use the channels in an already-existing
### cube from a .FITS file, or use the provided values)
if datafile is not None:
hd = fits.open(datafile)[0].header
f0, ix, nf, df = hd['CRVAL4'], hd['CRPIX4'], hd['NAXIS4'], hd['CDELT4']
freqs = f0 + (np.arange(nf) - ix + 1) * df
vel = const.c_ * (1 - freqs / restfreq)
else:
freqs = restfreq * (1 - vel / const.c_)
# adjust for systemic velocity
vlsr = vel - Vsys
### Generate the spectral line cube
cube = disk.get_cube(vlsr)
# convert from brightness temperatures to Jy / pixel
pixel_area = (disk.cell_sky * np.pi / (180 * 3600))**2
for i in range(len(freqs)):
cube[i,:,:] *= 1e26 * pixel_area * 2 * freqs[i]**2 * \
const.k_ / const.c_**2
### Prepare the output: either into the specified .FITS file or into a
### vis_sample "SKY OBJECT".
if outfile is not None:
hdu = fits.PrimaryHDU(cube[:,::-1,:])
header = hdu.header
# basic header inputs
header['EPOCH'] = 2000.
header['EQUINOX'] = 2000.
header['LATPOLE'] = -1.436915713634E+01
header['LONPOLE'] = 180.
# spatial coordinates
header['CTYPE1'] = 'RA---SIN'
header['CUNIT1'] = 'DEG'
header['CDELT1'] = -disk.cell_sky / 3600.
header['CRPIX1'] = 0.5 * disk.Npix + 0.5
header['CRVAL1'] = RA
header['CTYPE2'] = 'DEC--SIN'
header['CUNIT2'] = 'DEG'
header['CDELT2'] = disk.cell_sky / 3600.
header['CRPIX2'] = 0.5 * disk.Npix + 0.5
header['CRVAL2'] = DEC
# frequency coordinates
header['CTYPE3'] = 'FREQ'
header['CUNIT3'] = 'Hz'
header['CRPIX3'] = 1.
header['CDELT3'] = freqs[1]-freqs[0]
header['CRVAL3'] = freqs[0]
header['SPECSYS'] = 'LSRK'
header['VELREF'] = 257
# intensity units
header['BSCALE'] = 1.
header['BZERO'] = 0.
header['BUNIT'] = 'JY/PIXEL'
header['BTYPE'] = 'Intensity'
# output FITS
hdu.writeto(outfile, overwrite=True)
return cube[:,::-1,:]
# otherwise, return a vis_sample SkyObject
else:
# adjust cube formatting
mod_data = np.rollaxis(cube[:,::-1,:], 0, 3)
# spatial coordinates
npix_ra = disk.Npix
mid_pix_ra = 0.5 * disk.Npix + 0.5
delt_ra = -disk.cell_sky / 3600
if (delt_ra < 0):
mod_data = np.fliplr(mod_data)
mod_ra = (np.arange(npix_ra) - (mid_pix_ra-0.5))*np.abs(delt_ra)*3600
npix_dec = disk.Npix
mid_pix_dec = 0.5 * disk.Npix + 0.5
delt_dec = disk.cell_sky / 3600
if (delt_dec < 0):
mod_data = np.flipud(mod_data)
mod_dec = (np.arange(npix_dec)-(mid_pix_dec-0.5))*np.abs(delt_dec)*3600
# spectral coordinates
try:
nchan_freq = len(freqs)
mid_chan_freq = freqs[0]
mid_chan = 1
delt_freq = freqs[1] - freqs[0]
mod_freqs = (np.arange(nchan_freq)-(mid_chan-1))*delt_freq + \
mid_chan_freq
except:
mod_freqs = [0]
# return a vis_sample SkyImage object
return SkyImage(mod_data, mod_ra, mod_dec, mod_freqs, None)
def vismodel_full(pars, fixed, dataset,
chpad=3, oversample=None, noise_inject=None):
### - Prepare inputs
# Parse fixed parameters
restfreq, FOV, Npix, dist, rmax = fixed
npars = len(pars)
# Spatial frequencies to lambda units
uu = dataset.um * np.mean(dataset.nu_TOPO) / const.c_
vv = dataset.vm * np.mean(dataset.nu_TOPO) / const.c_
# Pad the frequency arrays
dnu_TOPO = np.diff(dataset.nu_TOPO)[0]
nu_TOPO_s = dataset.nu_TOPO[0] + dnu_TOPO * np.arange(-chpad, 0, 1)
nu_TOPO_f = dataset.nu_TOPO[-1] + dnu_TOPO * np.arange(1, chpad+1, 1)
dataset.nu_TOPO = np.concatenate((nu_TOPO_s, dataset.nu_TOPO, nu_TOPO_f))
dnu_LSRK = np.diff(dataset.nu_LSRK, axis=1)[:,0]
nu_LSRK_s = (dataset.nu_LSRK[:,0])[:,None] + \
dnu_LSRK[:,None] * np.arange(-chpad, 0, 1)[None,:]
nu_LSRK_f = (dataset.nu_LSRK[:,-1])[:,None] + \
dnu_LSRK[:,None] * np.arange(1, chpad+1, 1)[None,:]
dataset.nu_LSRK = np.concatenate((nu_LSRK_s, dataset.nu_LSRK, nu_LSRK_f),
axis=1)
# Upsample in the spectral domain (if necessary)
if oversample is not None:
nchan = dataset.nchan + 2 * chpad
nu_TOPO = np.interp(np.arange((nchan-1) * oversample + 1),
np.arange(0, nchan * oversample, oversample),
dataset.nu_TOPO)
nch = len(nu_TOPO)
nu_LSRK = np.empty((dataset.nstamps, nch))
for itime in range(dataset.nstamps):
nu_LSRK[itime,:] = np.interp(np.arange((nchan-1) * oversample + 1),
np.arange(0, nchan*oversample, oversample),
dataset.nu_LSRK[itime,:])
else:
nu_TOPO = dataset.nu_TOPO
nu_LSRK = dataset.nu_LSRK
nch = len(nu_TOPO)
oversample = 1
# LSRK velocities
v_LSRK = const.c_ * (1 - nu_LSRK / restfreq)
### - Configure noise (if necessary)
if noise_inject is not None:
# Scale input RMS for desired (naturally-weighted) noise per vis-chan
sigma_out = 1e-3 * noise_inject * np.sqrt(dataset.npol * dataset.nvis)
# Scale to account for spectral oversampling and SRF convolution
sigma_noise = sigma_out * np.sqrt(np.pi * oversample)
# Random Gaussian noise draws: note RE/IM separated for speed later
noise = np.random.normal(0, sigma_noise,
(dataset.npol, nch, dataset.nvis, 2))
noise = np.squeeze(noise)
### - Compute the model visibilities
# Loop through timestamps to get raw (sky) visibilities
mvis_pure = np.squeeze(np.empty((dataset.npol, nch, dataset.nvis, 2)))
for itime in range(dataset.nstamps):
# track the steps
print('timestamp '+str(itime+1)+' / '+str(dataset.nstamps))
# create a model cube
cube = cube_parser(pars[:npars-3], FOV=FOV, Npix=Npix, dist=dist,
r_max=rmax, Vsys=pars[10],
vel=v_LSRK[itime,:], restfreq=restfreq)
# indices for this timestamp only
ixl = np.min(np.where(dataset.tstamp == itime))
ixh = np.max(np.where(dataset.tstamp == itime)) + 1
# sample it's Fourier transform on the template (u,v) spacings
mvis = vis_sample(imagefile=cube, uu=uu[ixl:ixh], vv=vv[ixl:ixh],
mu_RA=pars[11], mu_DEC=pars[12], mod_interp=False).T
# populate the results in the output array *for this timestamp only*
mvis_pure[0,:,ixl:ixh,0] = mvis.real
mvis_pure[1,:,ixl:ixh,0] = mvis.real
mvis_pure[0,:,ixl:ixh,1] = mvis.imag
mvis_pure[1,:,ixl:ixh,1] = mvis.imag
# Convolve with the spectral response function
chix = np.arange(nch) / oversample
xch = chix - np.mean(chix)
SRF = 0.5 * np.sinc(xch) + 0.25 * np.sinc(xch-1) + 0.25 * np.sinc(xch+1)
mvis_pure = convolve1d(mvis_pure, SRF/np.sum(SRF), axis=1, mode='nearest')
# Return decimated visibilities, with noise if necessary
if noise_inject is None:
# Decimate and remove padding
mvis_pure = mvis_pure[:,::oversample,:,:]
mvis_pure = mvis_pure[:,chpad:-chpad,:,:]
# Convert to complex and return
return mvis_pure[:,:,:,0] + 1j * mvis_pure[:,:,:,1]
else:
# SRF convolution of noisy data
mvis_noisy = convolve1d(mvis_pure + noise, SRF/np.sum(SRF),
axis=1, mode='nearest')
# Decimate
mvis_pure = mvis_pure[:,::oversample,:,:]
mvis_pure = mvis_pure[:,chpad:-chpad,:,:]
mvis_noisy = mvis_noisy[:,::oversample,:,:]
mvis_noisy = mvis_noisy[:,chpad:-chpad,:,:]
# Convert to complex
mvis_pure = mvis_pure[:,:,:,0] + 1j * mvis_pure[:,:,:,1]
mvis_noisy = mvis_noisy[:,:,:,0] + 1j * mvis_noisy[:,:,:,1]
return mvis_pure, mvis_noisy
def vismodel_def(pars, fixed, dataset,
imethod='cubic', return_holders=False, chpad=3):
### - Prepare inputs
# Parse fixed parameters
restfreq, FOV, Npix, dist, rmax = fixed
npars = len(pars)
# Spatial frequencies to lambda units
uu = dataset.um * np.mean(dataset.nu_TOPO) / const.c_
vv = dataset.vm * np.mean(dataset.nu_TOPO) / const.c_
# Pad the frequency arrays
dnu_TOPO = np.diff(dataset.nu_TOPO)[0]
nu_TOPO_s = dataset.nu_TOPO[0] + dnu_TOPO * np.arange(-chpad, 0, 1)
nu_TOPO_f = dataset.nu_TOPO[-1] + dnu_TOPO * np.arange(1, chpad+1, 1)
nu_TOPO = np.concatenate((nu_TOPO_s, dataset.nu_TOPO, nu_TOPO_f))
dnu_LSRK = np.diff(dataset.nu_LSRK, axis=1)[:,0]
nu_LSRK_s = (dataset.nu_LSRK[:,0])[:,None] + \
dnu_LSRK[:,None] * np.arange(-chpad, 0, 1)[None,:]
nu_LSRK_f = (dataset.nu_LSRK[:,-1])[:,None] + \
dnu_LSRK[:,None] * | np.arange(1, chpad+1, 1) | numpy.arange |
import numpy as np
### MEASURES
# pureDP pure epsilon-DP
# approxDP approximate (epsilon, delta)-DP
# zCDP zero concentrated (xi, rho)-zCDP renyi divergence for all alpha
# smoothedzCDP approximate zero conc (xi, rho, delta)-zCDP the delta is equivalent to approxDP
# renyiDP renyi (alpha, epsilon')-RDP
### COMPOSITION
# composition_[measure]_[static|dynamic]_[homo|hetero]_[name]
# "static" when the choice of distances is fixed up-front
# "dynamic" when the choice of parameters is chosen adaptively
# "hetero" for heterogeneous, where each epsilon_i and delta_i may vary
# "homo" for homogeneous, where all k queries share the same `distance_0`.
# Omitted if a trivial simplification of heterogeneous composition
def composition_approxDP_static_hetero_basic(distance_is):
"""apply composition on `distance_is`, a list of individual distances
:param distance_is: a list of (epsilon, delta), or ndarray of shape [k, 2]
"""
epsilon_is, delta_is = zip(*distance_is)
return sum(epsilon_is), sum(delta_is)
def composition_approxDP_static_homo_advanced(distance_0, k, delta_p):
"""apply composition on `distance_0` in k-folds
"advanced" composition from Theorem 3.3 in https://guyrothblum.files.wordpress.com/2014/11/drv10.pdf
Sometimes also referred to as "strong" composition.
:param distance_0: per-query epsilon, delta
:param k: how many folds, number of queries
:param delta_p: how much additional delta to add, beyond basic composition of `delta_0`
:returns global (epsilon, delta) of k-fold composition of a (epsilon_0, delta_0)-DP mechanism
"""
epsilon_0, delta_0 = distance_0
epsilon_g = np.sqrt(2 * k * np.log(1 / delta_p)) * epsilon_0 + k * epsilon_0 * (
np.exp(epsilon_0) - 1
)
delta_g = delta_0 * k + delta_p
return epsilon_g, delta_g
def composition_approxDP_static_homo_optimal_analytic(distance_0, k, delta_p):
"""apply composition on `distance_0` in k-folds
"optimal" composition from KOV15
"analytic" because this is the looser closed form expression in Theorem 3.5: https://arxiv.org/pdf/1311.0776.pdf#subsection.3.3
:param distance_0: (epsilon, delta)
:param delta_p: p as in prime. Slack term for delta. Allows for nontrivial epsilon composition
"""
eps_0, del_0 = distance_0
bound1 = k * eps_0
bound2 = k * eps_0**2 + eps_0 * np.sqrt(
2 * k * np.log(np.exp(1) + np.sqrt(k * eps_0**2) * delta_p)
)
bound3 = k * eps_0**2 + eps_0 * np.sqrt(2 * k * np.log(1 / delta_p))
# Corresponds to Theorem 3.5 in KOV15. Ignoring nan.
epsilon = np.nanmin([bound1, bound2, bound3])
delta = 1 - (1 - delta_p) * (1 - del_0) ** k
return epsilon, delta
def composition_approxDP_static_hetero_optimal_analytic(distance_is, delta_p):
"""Find the (epsilon, delta) composition of `distances_is`.
"optimal" composition from KOV15
"analytic" because this is the looser closed form expression in Theorem 3.5: https://arxiv.org/pdf/1311.0776.pdf#subsection.3.3
:param distance_is: a list of (epsilon, delta), or ndarray of shape [k, 2]
:param delta_p: slack term for delta. Allows for tighter composition on epsilons
"""
epsilon_is, delta_is = np.array(distance_is).T
sum_of_squares = (epsilon_is**2).sum()
first_term = sum(ep * (np.exp(ep) - 1) / (np.exp(ep) + 1) for ep in epsilon_is)
# want to find the smallest of three bounds
bound1 = sum(epsilon_is)
bound2 = first_term + np.sqrt(
(2 * np.log(np.exp(1) + (np.sqrt(sum_of_squares) / delta_p))) * sum_of_squares
)
bound3 = first_term + np.sqrt(2 * np.log(1 / delta_p) * sum_of_squares)
# Corresponds to Theorem 3.5 in KOV15. Ignoring nan.
epsilon = np.nanmin([bound1, bound2, bound3])
delta = 1 - (1 - delta_p) * | np.prod(1 - delta_is) | numpy.prod |
import numpy as np
from skimage.color.rgb_colors import *
from skimage import draw
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
def angle_diff(a1, a2):
a1 = a1%180
a2 = a2%180
return abs(min(a1 - a2, 180 - abs(a1 - a2)))
def line_coords(img, sym, angle_bins, drange=10, arange=2, num_lines=1):
img = img[:,:,0:3]
#sym = sym.copy()
lines = []
for i in range(num_lines):
r, t = np.unravel_index(np.argmax(sym), sym.shape)
#print(r,t)
#print('r = ', r, 't = ', t)
offset = sym.shape[0]/2
line = InfLine(r - offset, angle_bins[t], img)
lines.append(line)
dmin = np.clip(r - drange - 1, 0, sym.shape[0])
dmax = np.clip(r + drange + 1, 0, sym.shape[0])
amin = np.clip(t - arange - 1, 0, sym.shape[1])
amax = np.clip(t + arange + 1, 0, sym.shape[1])
# fig = plt.figure()
# ax = fig.gca(projection='3d')
# x = np.arange(0, sym.shape[0])
# y = np.arange(0, sym.shape[1])
# xx, yy = np.meshgrid(y, x)
# surf = ax.plot_surface(xx, yy, sym, rstride=1, cstride=1, linewidth=0, antialiased=False)
# plt.show()
sym[dmin:dmax, amin:amax] = 0
return lines
def dist_point_line(x,y,x1,y1,x2,y2):
if y1 == y2:
return y - y1
elif x1 == x2:
return x - x1
else:
#m = float(y2 - y1)/(x2 - x1)
#c = y1 - m*x1
#dist = (y - m*x -c)/np.sqrt(1 + m*m)
num = (y2 - y1)*x - (x2 - x1)*y + x2*y1 - y2*x1
den = np.sqrt((x2 - x1)**2 + (y2 - y1)**2)
return abs(num/den)
class Line(object):
def __init__(self, x1, y1, x2, y2):
self.x1 = x1
self.x2 = x2
self.y1 = y1
self.y2 = y2
self.cx = (x1 + x2)/2
self.cy = (y1 + y2)/2
self.angle = np.arctan2(y1 - y2, x1 - x2)
self.angle = self.angle%np.pi
self.len = | np.hypot(y1 - y2, x1 - x2) | numpy.hypot |
# -*- coding: utf-8 -*-
from __future__ import print_function
import numpy as np
from scipy.integrate import ode, odeint
import scipy.optimize as optimize
from ipydex import IPS
class Simulator(object):
"""
This class simulates the initial value problem that results from solving
the boundary value problem of the control system.
See __init__ for details.
"""
def __init__(self, ff, T, x_start, x_col_fnc, u_col_fnc, z_par=None, dt=0.01, mpc_flag=False):
"""
:param ff: vectorfield function
:param T: end Time
:param x_start: initial state
:param x_col_fnc: state function u(t)
:param u_col_fnc: input function u(t)
:param dt:
"""
self.ff = ff
self.T = T
self.x_start = x_start
self.mpc_flag = mpc_flag
# x and u from collocation
self.x_col_fnc = x_col_fnc # ##:: self.eqs.trajectories.x
self.u_col_fnc = u_col_fnc # ##:: self.eqs.trajectories.u
self.dt = dt
# this is where the solutions go
self.xt = []
self.ut = []
self.nu = len(np.atleast_1d(self.u_col_fnc(0)))
# save optimal u values for each dt-step
self.mpc_cache = {}
# handle absence of additional free parameters
if z_par is None:
z_par = []
self.pt = z_par
# time steps
self.t = []
# get the values at t=0
self.xt.append(x_start)
self.ut.append(self.u_col_fnc(0.0)) ##:: array([ 0.])
self.t.append(0.0)
# initialise our ode solver
self.solver = ode(self.rhs)
self.solver.set_initial_value(x_start)
self.solver.set_integrator('vode', method='adams', rtol=1e-6)
# self.solver.set_integrator('lsoda', rtol=1e-6)
# self.solver.set_integrator('dop853', rtol=1e-6)
def calc_input(self, t):
if self.mpc_flag:
u = self.u_col_fnc(t) + self.mpc_corrector(t)
else:
u = self.u_col_fnc(t)
return u
def rhs(self, t, x):
"""
Retruns the right hand side (vector field) of the ode system.
"""
u = self.calc_input(t)
p = self.pt
dx = self.ff(x, u, t, p)
return dx
def calcstep(self):
"""
Calculates one step of the simulation.
"""
x = list(self.solver.integrate(self.solver.t + self.dt))
t = round(self.solver.t, 5)
if 0 <= t <= self.T:
self.xt.append(x)
self.ut.append(self.calc_input(t))
self.t.append(t)
return t, x
def simulate(self):
"""
Starts the simulation
Returns
-------
List of numpy arrays with time steps and simulation data of system and input variables.
"""
t = 0
while t <= self.T:
t, y = self.calcstep()
self.ut = np.array(self.ut).reshape(-1, self.nu)
return [np.array(self.t), np.array(self.xt), np.array(self.ut)]
def mpc_corrector(self, t):
"""
calculate a (hopefully small) correction of the u-signal from collocation to force the
state x back to the reference (also from collocation).
Motivation: In the case of unstable systems error between x_col and x_sim grows
exponentially. This should be mitigated by adapting u appropriately
:param t:
:return:
"""
n_state = len(self.x_start)
n_input = len(self.u_col_fnc(0))
N = n_state
u0 = | np.zeros(N + 1) | numpy.zeros |
import numpy as np
from numpy.testing._private.utils import assert_array_max_ulp
from scipy import integrate
import scipy.linalg
import scipy
from . import bibo
import matplotlib.pyplot as plt
class LTI():
"""main object
#dimension: ndim of state,input and output vector
Raises:
assert: [description]
ValueError: [description]
ValueError: [description]
Returns:
[type]: [description]
"""
bibo_result = {
-1 : "System is not stable",
0 : "Unable to conclude about system's stability",
1 : "System is stable"
}
def __init__(self,**kwargs):
"""constructor of LTI system. LTI has some follwing basic attributes:
Args:
expected keyword for constructor method
A : system matrix, if not provide, raise assert error
B : input matrix, if not provide, B is None
C : output matrix, if not provide, C is None
D : input matrix, if not provide, D is None
"""
assert "A" in kwargs, "matrix A must be provided"
A = kwargs.get('A')
B = kwargs.get('B')
C = kwargs.get('C')
D = kwargs.get('D')
self.Nx = kwargs.get('Nx')
self.Nx = kwargs.get('Ny')
for i in ['A','B','C','D']:
if kwargs.get(i) is not None:
assert isinstance(kwargs.get(i),np.ndarray), f"Invalid data type of {i}"
assert kwargs.get(i).ndim==2, f'Invlid ndim of matrix {i}, {i}.ndim must be 2'
if B is not None:
assert A.shape[0] == A.shape[1] and A.shape[0] ==B.shape[0] , f'Invalid shape of matrix A,B, \n A.shape ={A.shape} and B.shape={B.shape}'
self._inputs_shape = B.shape[1]
self._A = A
self._B = B
self._C = C
self._D = D
self._states_shape = A.shape[0]
if C is not None:
self._outputs_shape = C.shape[0]
#input_function = kwargs.get('u')
#self._x0 = kwargs.get('x0')
#if self._x0 is not None:
# self._x0 = self._x0.reshape(-1,1)
self._max_step = kwargs.get('max_step')
@property
def states_shape(self,) -> int:
return self._states_shape
@property
def inputs_shape(self,) -> int:
if hasattr(self,'_inputs_shape'):
return self._inputs_shape
else:
return None
@property
def outputs_shape(self,) -> int:
if hasattr(self,'_outputs_shape'):
return self._outputs_shape
else:
return None
@property
def max_step(self):
return self._max_step
@property
def A(self,):
return self._A
@property
def B(self,):
return self._B
@property
def C(self,):
return self._C
@property
def D(self):
return self._D
@property
def dimension(self,) -> list:
"""An attributes of system
Returns:
list: got the length 3, dimention of
"""
return self.states_shape, self.inputs_shape, self.outputs_shape
def eigvals(self):
"""Compute the eigen values of system matrix (matrix A)
Returns:
[np.ndarray]: [1D array of eigvalues]
"""
return scipy.linalg.eigvals(self._A)
def is_stable(self,algorimth='hurwitz', **kwagrs) -> int:
"""[Compute the stability of system]
Args:
algorimth (str, optional): [select the algorithms to determine stability of system ]. Defaults to 'hurwitz'.
Returns:
int: 1 - if system is stable
0 - if selected algorithms can't conclude about stability of system
-1 - if system is unstable
"""
assert algorimth in ["gerschgorin","lyapunov" ,"hurwitz"], f"Invalid algorithm, must be \
in ['gerschgorin','lyapunov' ,'hurwitz']"
if algorimth=='gerschgorin': #Gerschgorin
std = bibo.Gerschgorin(self._A)
result = std.conclusion()
print(LTI.bibo_result[result])
return result
if algorimth=='lyapunov':
P = kwagrs.get('P')
Q = kwagrs.get('Q')
std = bibo.Lyapunov(A=self._A, P=P, Q=Q)
result = std.conclusion()
print(LTI.bibo_result[result])
return result
if algorimth=='hurwitz':
std = bibo.Hurwitz(A=self._A)
result = std.conclusion()
print(LTI.bibo_result[result])
return result
def is_controlable(self,algorimth='kalman', **kwagrs) -> bool:
"""Determine the controllability of system.
Args:
algorimth (str, optional): select the algorithms to determine controllability of system. Defaults to 'kalman'.
Raises:
ValueError: if the input matrix (matrix B) not found
Returns:
bool: True if system is controlalbe
"""
if self._B is None:
raise ValueError('please provide B matrix')
A = self._A
B = self._B
M = B
ndim = self._states_shape
if ndim==1:
if np.linalg.matrix_rank(B) == 1:
return True
else:
return False
X = A @ B
M = np.hstack([M,X])
for i in range(ndim-2):
X = A @ X
M = np.hstack([M,X])
if np.linalg.matrix_rank(M)==ndim:
return True
else:
return False
def is_observable(self,algorimth='kalman') -> bool:
"""Determine the observability of system.
Args:
algorimth (str, optional): select the algorithms to determine observability of system. Defaults to 'kalman'.
Raises:
ValueError: if the output matrix (matrix C) not found
Returns:
bool: True is system is observable
"""
#assert self._C is not None, 'please fill matrix C to calculate observability'
if self._C is None:
raise ValueError('please provide C matrix')
A = self._A
C = self._C
M = C
ndim = self._states_shape
if ndim==1:
if | np.linalg.matrix_rank(C) | numpy.linalg.matrix_rank |
import matplotlib.pyplot as plt
import numpy as np
import math
import scipy as sp
from scipy.io.wavfile import read
def file_normalize(audio):
# return (audio / 128.) - 1
return audio
# Reads a WAV file and returns the sampling rate and audio as a numpy array
def file_read(filepath):
fs, x = read(filepath)
return fs, x
# Returns a 2-D array of the audio signal blocked according to block size and hop size
def block_audio(x, blockSize, hopSize, fs):
"""
Sample audio blocking code from Alex
"""
# allocate memory
numBlocks = int(np.ceil(x.size / hopSize))
xb = np.zeros([numBlocks, blockSize])
# compute time stamps
t = (np.arange(0, numBlocks) * hopSize) / fs
x = np.concatenate((x, np.zeros(blockSize)), axis=0)
for n in range(0, numBlocks):
i_start = n * hopSize
i_stop = np.min([x.size - 1, i_start + blockSize - 1])
xb[n][np.arange(0, blockSize)] = x[np.arange(i_start, i_stop + 1)]
return xb, t
# Apply Von-Hann window
def compute_hann(iWindowLength):
"""
Sample compute hann window code from Alex
"""
return 0.5 - (0.5 * np.cos(2 * np.pi / iWindowLength * np.arange(iWindowLength)))
# Computes the Short Time Fourier Transform
def compute_stft(xb, fs, block_size, hop_size):
numBlocks = xb.shape[0]
afWindow = compute_hann(xb.shape[1])
X = np.zeros([math.ceil(xb.shape[1] / 2 + 1), numBlocks])
for n in range(0, numBlocks):
# apply window
tmp = abs(sp.ifft(xb[n, :] * afWindow)) * 2 / xb.shape[1]
# compute magnitude spectrum
X[:, n] = tmp[range(math.ceil(tmp.size / 2 + 1))]
X[[0, math.ceil(tmp.size / 2)], n] = X[[0, math.ceil(tmp.size / 2)], n] / np.sqrt(2)
return X, fs
# Computes the Harmonic Product Spectrum from the DFT of the blocked audio signal
def HPS(X, fs, order):
freqRange = int((len(X[0]) - 1) / order)
# print len(X)
# print X.shape
f0 = np.zeros((1, len(X)))
hps = np.zeros((len(X), freqRange))
freqSpread = np.linspace(0, fs / 2, len(X[0]))
for h in range(len(X)):
for i in range(freqRange):
multiplier = 1
for j in range(1, order + 1):
multiplier = multiplier * (X[h, i * j])
hps[h, i] = multiplier
if max(hps[h, :]) > 10 ** 10:
hps[h, :] = hps[h, :] / max(hps[h, :])
return hps
# Computes the pitch class profile (chromagram) from the HPS obtained earlier
def extract_pitch_chroma(X, fs, tfInHz, baseline_ver = 1):
if baseline_ver == 1:
Y = | np.abs(X) | numpy.abs |
# -*- coding: utf-8 -*-
################################################################
# The contents of this file are subject to the BSD 3Clause (New) License
# you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
# http://directory.fsf.org/wiki/License:BSD_3Clause
# Software distributed under the License is distributed on an "AS IS"
# basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
# License for the specific language governing rights and limitations
# under the License.
# The Original Code is part of the PyRadi toolkit.
# The Initial Developer of the Original Code is <NAME>,
# Portions created by <NAME> are Copyright (C) 2006-2012
# All Rights Reserved.
# Contributor(s): ______________________________________.
################################################################
"""
This module provides various utility functions for radiometry calculations.
Functions are provided for a maximally flat spectral filter, a simple photon
detector spectral response, effective value calculation, conversion of spectral
domain variables between [um], [cm^-1] and [Hz], conversion of spectral
density quantities between [um], [cm^-1] and [Hz] and spectral convolution.
See the __main__ function for examples of use.
This package was partly developed to provide additional material in support of students
and readers of the book Electro-Optical System Analysis and Design: A Radiometry
Perspective, <NAME>, ISBN 9780819495693, SPIE Monograph Volume
PM236, SPIE Press, 2013. http://spie.org/x648.html?product_id=2021423&origin_id=x646
"""
__version__= "$Revision$"
__author__= 'pyradi team'
__all__= ['buildLogSpace','sfilter', 'responsivity', 'effectiveValue', 'convertSpectralDomain',
'convertSpectralDensity', 'convolve', 'savitzkyGolay1D','abshumidity', 'TFromAbshumidity',
'rangeEquation','_rangeEquationCalc','detectThresholdToNoiseTpFAR',
'detectSignalToNoiseThresholdToNoisePd',
'detectThresholdToNoiseSignalToNoisepD',
'detectProbabilityThresholdToNoiseSignalToNoise',
'detectFARThresholdToNoisepulseWidth', 'upMu',
'cart2polar', 'polar2cart','index_coords','framesFirst','framesLast',
'rect', 'circ','poissonarray','draw_siemens_star','drawCheckerboard',
'makemotionsequence','extractGraph','luminousEfficiency','Spectral',
'Atmo','Sensor','Target','calcMTFwavefrontError',
'polar2cartesian','warpPolarImageToCartesianImage','warpCartesianImageToPolarImage',
'intify_tuple','differcommonfiles','blurryextract','update_progress'
]
import sys
import numpy as np
from scipy import constants
from scipy import interpolate
import matplotlib.pyplot as plt
from matplotlib.patches import Wedge
from matplotlib.collections import PatchCollection
import os
import pkg_resources
from numbers import Number
if sys.version_info[0] > 2:
from io import StringIO
else:
from StringIO import StringIO
#################################################################################
"""
Gathers and presents version information.
Adapted from https://github.com/ahmedsalhin/version_information
This makes it much easier to determine which versions of modules
were installed in the source IPython interpreter's environment.
Produces output in:
* Plaintext (IPython [qt]console)
* HTML (IPython notebook, ``nbconvert --to html``, ``--to slides``)
* JSON (IPython notebook ``.ipynb`` files)
* LaTeX (e.g. ``ipython nbconvert example.ipynb --to LaTeX --post PDF``)
Usage
======
.. sourcecode:: ipython
print(ryutils.VersionInformation('matplotlib,numpy'))
"""
import html
import json
import sys
import time
import locale
import IPython
import platform
try:
import pkg_resources
except ImportError:
pkg_resources = None
timefmt = '%a %b %d %H:%M:%S %Y %Z'
def _date_format_encoding():
return locale.getlocale(locale.LC_TIME)[1] or locale.getpreferredencoding()
class VersionInformation():
def __init__(self,line=''):
self.version_information( line=line)
def version_information(self, line=''):
"""Show information about versions of modules.
Usage:
%version_information [optional comma-separated list of modules]
"""
self.packages = [
("Python", "{version} {arch} [{compiler}]".format(
version=platform.python_version(),
arch=platform.architecture()[0],
compiler=platform.python_compiler())),
("IPython", IPython.__version__),
("OS", platform.platform().replace('-', ' '))
]
modules = line.replace(' ', '').split(",")
for module in modules:
if len(module) > 0:
try:
code = ("import %s; version=str(%s.__version__)" %
(module, module))
ns_g = ns_l = {}
exec(compile(code, "<string>", "exec"), ns_g, ns_l)
self.packages.append((module, ns_l["version"]))
except Exception as e:
try:
if pkg_resources is None:
raise
version = pkg_resources.require(module)[0].version
self.packages.append((module, version))
except Exception as e:
self.packages.append((module, str(e)))
return self
def _repr_json_(self):
obj = {
'Software versions': [
{'module': name, 'version': version} for
(name, version) in self.packages]}
if IPython.version_info[0] >= 3:
return obj
else:
return json.dumps(obj)
@staticmethod
def _htmltable_escape(str_):
CHARS = {
'&': r'\&',
'%': r'\%',
'$': r'\$',
'#': r'\#',
'_': r'\_',
'{': r'\letteropenbrace{}',
'}': r'\letterclosebrace{}',
'~': r'\lettertilde{}',
'^': r'\letterhat{}',
'\\': r'\letterbackslash{}',
'>': r'\textgreater',
'<': r'\textless',
}
return u"".join([CHARS.get(c, c) for c in str_])
def _repr_html_(self):
html_table = "<table>"
html_table += "<tr><th>Software</th><th>Version</th></tr>"
for name, version in self.packages:
_version = self._htmltable_escape(version)
html_table += "<tr><td>%s</td><td>%s</td></tr>" % (name, _version)
try:
html_table += "<tr><td colspan='2'>%s</td></tr>" % time.strftime(timefmt)
except:
html_table += "<tr><td colspan='2'>%s</td></tr>" % \
time.strftime(timefmt).decode(_date_format_encoding())
html_table += "</table>"
return html_table
@staticmethod
def _latex_escape(str_):
CHARS = {
'&': r'\&',
'%': r'\%',
'$': r'\$',
'#': r'\#',
'_': r'\_',
'{': r'\letteropenbrace{}',
'}': r'\letterclosebrace{}',
'~': r'\lettertilde{}',
'^': r'\letterhat{}',
'\\': r'\letterbackslash{}',
'>': r'\textgreater',
'<': r'\textless',
}
return u"".join([CHARS.get(c, c) for c in str_])
def _repr_latex_(self):
latex = r"\begin{tabular}{|l|l|}\hline" + "\n"
latex += r"{\bf Software} & {\bf Version} \\ \hline\hline" + "\n"
for name, version in self.packages:
_version = self._latex_escape(version)
latex += r"%s & %s \\ \hline" % (name, _version) + "\n"
try:
latex += r"\hline \multicolumn{2}{|l|}{%s} \\ \hline" % \
time.strftime(timefmt) + "\n"
except:
latex += r"\hline \multicolumn{2}{|l|}{%s} \\ \hline" % \
time.strftime(timefmt).decode(_date_format_encoding()) + "\n"
latex += r"\end{tabular}" + "\n"
return latex
def _repr_pretty_(self):
text = "Software versions\n"
for name, version in self.packages:
text += "%s %s\n" % (name, version)
try:
text += "%s" % time.strftime(timefmt)
except:
text += "%s" % \
time.strftime(timefmt).decode(_date_format_encoding())
import pprint
pprint.pprint(text)
def __str__(self):
text = 'Software versions\n'
for name, version in self.packages:
text += f"{name}: {version}\n"
try:
text += f"{time.strftime(timefmt)}"
except:
text += f"{time.strftime(timefmt).decode(_date_format_encoding())}"
return text
##############################################################################
##
def buildLogSpace(Vmin,Vmax,nDec,patn=False):
"""Calculate a log space given low, high and number samples per decade
If patn is True, the upper limit is adjusted to obtain a
repeat numeric pattern in each dcade.
Args:
| Vmin (float) lower limit
| Vmax (float) upper limit
| nDec (int) number of points per decade
| patn (bool) repeat pattern in each decade
Returns:
| vector with equal spacing in log
Raises:
| No exception is raised.
"""
decs = int(np.log10(Vmax/Vmin))
if patn:
ful = np.log10(Vmax/Vmin)
upp = np.ceil(nDec *(ful - decs))
num = np.ceil(decs * nDec + upp + 1)
Vmax = 10 ** (np.log10(Vmin) + ((num-1) / nDec))
else:
num = np.ceil(decs * nDec)
return np.logspace(np.log10(Vmin),np.log10(Vmax),num)
##############################################################################
##
def update_progress(progress, bar_length=20):
"""Simple text-based progress bar for Jupyter notebooks.
Note that clear_output, and hence this function wipes the entire cell output,
including previous output and widgets.
Usage:
import pyradi.ryutils as ryutils
import time
print('before')
#Replace this with a real computation
number_of_elements = 100
for i in range(number_of_elements):
time.sleep(0.1)
# progress must be a float between 0 and 1
ryutils.update_progress((i+1) / number_of_elements,bar_length=40)
print('after')
source:
https://mikulskibartosz.name/how-to-display-a-progress-bar-in-jupyter-notebook-47bd4c2944bf
https://ipython.org/ipython-doc/3/api/generated/IPython.display.html#IPython.display.clear_output
Wait to clear the output until new output is available to replace it.
"""
from IPython.display import clear_output
if isinstance(progress, int):
progress = float(progress)
if not isinstance(progress, float):
progress = 0
if progress < 0:
progress = 0
if progress >= 1:
progress = 1
block = int(round(bar_length * progress))
clear_output(wait = True)
text = "Progress: [{0}] {1:.1f}%".format( "#" * block + "-" * (bar_length - block), progress * 100)
print(text)
##############################################################################
##
def solidAngleSquare(width,breadth,height,stype,numsamples):
"""Calculate the solid angle of a rectagular plate from a point on the normal at its centre
The solid angle of a rectangular flat surface, with dimensions $W$ and $D$, as seen from a
reference point centered above the surface, is determined by the integral of the projected
area of a small elemental area $\cos\theta\,dd\,dw$ across the full size of the surface:
$$
\omega_{\rm s}=\int_W\int_D\frac{dw\,dd\,\cos^{n-2}\theta}{R^2}
$$
$$
\omega_{\rm s}=\int_W\int_D\frac{dw\,dd\,\cos^n\theta}{H^2}
$$
$$
\omega_{\rm s}=\int_W\int_D\frac{dw\,dd\,}{H^2}\left(\frac{H}{R}\right)^n
$$
$$\omega_{\rm s}=\int_W\int_D\frac{dw\,dd\,}{H^2}\left(\frac{H}{\sqrt{w^2+d^2+H^2}}\right)^n,
$$
where $H$ is the reference point height above the surface, and $n=3$ for the geometrical solid angle
and $n=4$ for the projected solid angle. The integral is performed along the $W$ and $D$ dimensions
with increments of $dw$ and $dd$. The slant range between the reference point and the elemental area
$dd\times dw$ is $R=H/\cos\theta$.
Args:
| width (float): size along one edge of rectangle
| breadth (float): size along the second edge of rectangle
| height (float): distance along normal to the rect to reference point
| stype (str): type of solid angle can be one of ('g' or 'p') for ('geometric','projected')
| numsamples (int): number of samples along edges
Returns:
| solid angle (float) or None if incorrect type
Raises:
| No exception is raised.
"""
varx = np.linspace(-width/2, width/2, numsamples)
vary = np.linspace(-breadth/2, breadth/2, numsamples)
x, y = np.meshgrid(varx, vary)
if stype[0]=='g':
gv = (1. / ( (x / height) ** 2 + (y / height) ** 2 + 1 ) ) ** (3 / 2)
elif stype[0]=='p':
gv = (1. / ( (x / height) ** 2 + (y / height) ** 2 + 1 ) ) ** (4 / 2)
else:
return None
solidAngle = np.trapz(np.ravel(gv), dx=breadth*width/(numsamples**2))/(height*height)
return solidAngle
##############################################################################
##
def intify_tuple(tup):
"""Make tuple entries int type
"""
tup_int = ()
for tup_ent in tup:
tup_int = tup_int + (int(tup_ent),)
return tup_int
##############################################################################
##
def framesFirst(imageSequence):
"""Image sequence with frames along axis=2 (last index), reordered such that
frames are along axis=0 (first index).
Image sequences are stored in three-dimensional arrays, in rows, columns and frames.
Not all libraries share the same sequencing, some store frames along axis=0 and
others store frames along axis=2. This function reorders an image sequence with
frames along axis=2 to an image sequence with frames along axis=0. The function
uses np.transpose(imageSequence, (2,0,1))
Args:
| imageSequence (3-D np.array): image sequence in three-dimensional array, frames along axis=2
Returns:
| ((3-D np.array): reordered three-dimensional array (view or copy)
Raises:
| No exception is raised.
"""
return np.transpose(imageSequence, (2,0,1))
##############################################################################
##
def framesLast(imageSequence):
"""Image sequence with frames along axis=0 (first index), reordered such that
frames are along axis=2 (last index).
Image sequences are stored in three-dimensional arrays, in rows, columns and frames.
Not all libraries share the same sequencing, some store frames along axis=0 and
others store frames along axis=2. This function reorders an image sequence with
frames along axis=0 to an image sequence with frames along axis=2. The function
uses np.transpose(imageSequence, (1,2,0))
Args:
| imageSequence (3-D np.array): image sequence in three-dimensional array, frames along axis=0
Returns:
| ((3-D np.array): reordered three-dimensional array (view or copy)
Raises:
| No exception is raised.
"""
return np.transpose(imageSequence, (1,2,0))
##############################################################################
##
def index_coords(data, origin=None, framesFirst=True):
"""Creates (x,y) zero-based coordinate arrrays for a numpy array indices, relative to some origin.
This function calculates two meshgrid arrays containing the coordinates of the
input array. The origin of the new coordinate system defaults to the
center of the image, unless the user supplies a new origin.
The data format can be data.shape = (rows, cols, frames) or
data.shape = (frames, rows, cols), the format of which is indicated by the
framesFirst parameter.
Args:
| data (np.array): array for which coordinates must be calculated.
| origin ( (x-orig, y-orig) ): data-coordinates of where origin should be
| framesFirst (bool): True if data.shape is (frames, rows, cols), False if
data.shape is (rows, cols, frames)
Returns:
| x (float np.array): x coordinates in array format.
| y (float np.array): y coordinates in array format.
Raises:
| No exception is raised.
original code by <NAME>
https://stackoverflow.com/questions/3798333/image-information-along-a-polar-coordinate-system
"""
if framesFirst:
ny, nx = data.shape[1:3]
else:
ny, nx = data.shape[:2]
if origin is None:
origin_x, origin_y = nx // 2, ny // 2
else:
origin_x, origin_y = origin
x, y = np.meshgrid(np.arange(nx), np.arange(ny))
x -= origin_x
y -= origin_y
return x, y
##############################################################################
##
def cart2polar(x, y):
"""Converts from cartesian to polar coordinates, given (x,y) to (r,theta).
Args:
| x (float np.array): x values in array format.
| y (float np.array): y values in array format.
Returns:
| r (float np.array): radial component for given (x,y).
| theta (float np.array): angular component for given (x,y).
Raises:
| No exception is raised.
original code by <NAME>
https://stackoverflow.com/questions/3798333/image-information-along-a-polar-coordinate-system
"""
r = np.sqrt(x**2 + y**2)
theta = np.arctan2(y, x)
return r, theta
##############################################################################
##
def polar2cart(r, theta):
"""Converts from polar to cartesian coordinates, given (r,theta) to (x,y).
Args:
| r (float np.array): radial values in array format.
| theta (float np.array): angular values in array format.
Returns:
| x (float np.array): x component for given (r, theta).
| y (float np.array): y component for given (r, theta).
Raises:
| No exception is raised.
original code by <NAME>
https://stackoverflow.com/questions/3798333/image-information-along-a-polar-coordinate-system
"""
x = r * np.cos(theta)
y = r * np.sin(theta)
return x, y
##############################################################################
##
def upMu(uprightMu=True, textcomp=False):
"""Returns a LaTeX micron symbol, either an upright version or the normal symbol.
The upright symbol requires that the siunitx LaTeX package be installed on the
computer running the code. This function also changes the Matplotlib rcParams
file.
Args:
| uprightMu (bool): signals upright (True) or regular (False) symbol (optional).
| textcomp (bool): if True use the textcomp package, else use siunitx package (optional).
Returns:
| range (string): LaTeX code for the micro symbol.
Raises:
| No exception is raised.
"""
if sys.version_info[0] < 3:
if uprightMu:
from matplotlib import rc, font_manager
import matplotlib as mpl
rc('text', usetex=True)
# set up the use of external latex, fonts and packages
if not textcomp :
mpl.rcParams['text.latex.preamble'] = [
# r'\usepackage{siunitx}', # i need upright \micro symbols, but you need...
'\\usepackage{siunitx}', # i need upright \micro symbols, but you need...
'\\sisetup{detect-all}', # ...this to force siunitx to actually use your fonts
'\\usepackage{helvet}', # set the normal font here
'\\usepackage{sansmath}', # load up the sansmath so that math -> helvet
'\\sansmath'] # <- tricky! -- gotta actually tell tex to use!
upmu = '\si{\micro}'
else:
mpl.rcParams['text.latex.preamble'] = [
'\\usepackage{textcomp}', # i need upright \micro symbols, but you need...
'\\usepackage{helvet}', # set the normal font here
'\\usepackage{sansmath}', # load up the sansmath so that math -> helvet
'\\sansmath' # <- tricky! -- gotta actually tell tex to use!
]
upmu = '\\textmu{}'
else:
upmu = '$\\mu$'
else:
upmu = '\u00B5'
return upmu
##############################################################################
##
def detectFARThresholdToNoisepulseWidth(ThresholdToNoise, pulseWidth):
""" Solve for the FAR, given the threshold to noise ratio and pulse width, for matched filter.
References:
"Electro-optics handbook," Tech. Rep. EOH-11, RCA, 1974. RCA Technical Series Publication.
<NAME>, Detection Theory: Applications and Digital Signal Processing, CRC Press, 2002
Args:
| ThresholdToNoise (float): the threshold to noise ratio.
| pulseWidth (float): the signal pulse width in [s].
Returns:
| FAR (float): the false alarm rate in [alarms/s]
Raises:
| No exception is raised.
"""
FAR = np.exp(- (ThresholdToNoise ** 2) / 2.) / (2. * pulseWidth * np.sqrt(3))
return FAR
##############################################################################
##
def detectThresholdToNoiseTpFAR(pulseWidth, FAR):
""" Solve for threshold to noise ratio, given pulse width and FAR, for matched filter.
Using the theory of matched filter design, calculate the
threshold to noise ratio, to achieve a required false alarm rate.
References:
"Electro-optics handbook," Tech. Rep. EOH-11, RCA, 1974. RCA Technical Series Publication.
<NAME>, Detection Theory: Applications and Digital Signal Processing, CRC Press, 2002
Args:
| pulseWidth (float): the signal pulse width in [s].
| FAR (float): the false alarm rate in [alarms/s]
Returns:
| range (float): threshold to noise ratio
Raises:
| No exception is raised.
"""
ThresholdToNoise = np.sqrt(-2 * np.log (2 * pulseWidth * np.sqrt(3) * FAR ))
return ThresholdToNoise
##############################################################################
##
def detectSignalToNoiseThresholdToNoisePd(ThresholdToNoise, pD):
""" Solve for the signal to noise ratio, given the threshold to noise ratio and
probability of detection.
Using the theory of matched filter design, calculate the
signal to noise ratio, to achieve a required probability of detection.
References:
"Electro-optics handbook," Tech. Rep. EOH-11, RCA, 1974. RCA Technical Series Publication.
<NAME>, Detection Theory: Applications and Digital Signal Processing, CRC Press, 2002
Args:
| ThresholdToNoise (float): the threshold to noise ratio [-]
| pD (float): the probability of detection [-]
Returns:
| range (float): signal to noise ratio
Raises:
| No exception is raised.
"""
import scipy.special
SignalToNoise = np.sqrt(2) * scipy.special.erfinv(2 * pD -1) + ThresholdToNoise
return SignalToNoise
##############################################################################
##
def detectThresholdToNoiseSignalToNoisepD(SignalToNoise, pD):
""" Solve for the threshold to noise ratio, given the signal to noise ratio and
probability of detection.
References:
"Electro-optics handbook," Tech. Rep. EOH-11, RCA, 1974. RCA Technical Series Publication.
<NAME>, Detection Theory: Applications and Digital Signal Pro-cessing, CRC Press, 2002
Args:
| SignalToNoise (float): the signal to noise ratio [-]
| pD (float): the probability of detection [-]
Returns:
| range (float): signal to noise ratio
Raises:
| No exception is raised.
"""
import scipy.special
ThresholdToNoise = SignalToNoise - np.sqrt(2) * scipy.special.erfinv(2 * pD -1)
return ThresholdToNoise
##############################################################################
##
def detectProbabilityThresholdToNoiseSignalToNoise(ThresholdToNoise, SignalToNoise):
""" Solve for the probability of detection, given the signal to noise ratio and
threshold to noise ratio
References:
"Electro-optics handbook," Tech. Rep. EOH-11, RCA, 1974. RCA Technical Series Publication.
<NAME>, Detection Theory: Applications and Digital Signal Pro-cessing, CRC Press, 2002
Args:
| ThresholdToNoise (float): the threshold to noise ratio [-]
| SignalToNoise (float): the signal to noise ratio [-]
Returns:
| range (float): probability of detection
Raises:
| No exception is raised.
"""
import scipy.special
pD = 0.5 * (scipy.special.erf((SignalToNoise - ThresholdToNoise) / np.sqrt(2)) + 1)
return pD
##############################################################################
##
def rangeEquation(Intensity, Irradiance, rangeTab, tauTab, rangeGuess = 1, n = 2):
""" Solve the range equation for arbitrary transmittance vs range.
This function solve for the range :math:`R` in the range equation
.. math::
E = \\frac{I\\tau_a(R)}{R^n}
where :math:`E` is the threshold irradiance in [W/m2],
and :math:`I` is the intensity in [W/sr]. This range equation holds for
the case where the target is smaller than the field of view.
The range :math:`R` must be in [m], and :math:`\\tau_a(R)`
is calculated from a lookup table of atmospheric transmittance vs. range.
The transmittance lookup table can be calculated from the simple Bouguer law,
or it can have any arbitrary shape, provided it decreases with increasing range.
The user supplies the lookup table in the form of an array of range values and
an associated array of transmittance values. The range values need not be on
constant linear range increment.
The parameter :math:`n`
* :math:`n=2` (default value) the general case of a radiating source
smaller than the field of view.
* :math:`n=4` the special case of a laser range finder illuminating a target
smaller than the field of view, viewed against the sky. In this case there
is an :math:`R^2` attenuation from the laser to the source and another
:math:`R^2` attenuation from the source to the receiver, hence
:math:`R^4` overall.
If the range solution is doubtful (e.g. not a trustworthy solution) the
returned value is made negative.
Args:
| Intensity (float or np.array[N,] or [N,1]): in [W/sr].
| Irradiance (float or np.array[N,] or [N,1]): in [W/m2].
| rangeTab (np.array[N,] or [N,1]): range vector for tauTab lookup in [m]
| tauTab (np.array[N,] or [N,1]): transmittance vector for lookup in [m]
| rangeGuess (float): starting value range estimate in [m] (optional)
| n (float): range power (2 or 4) (optional)
Returns:
| range (float or np.array[N,] or [N,1]): Solution to the range equation in [m].
Value is negative if calculated range exceeds the top value in range table,
or if calculated range is too near the lower resolution limit.
Raises:
| No exception is raised.
"""
from scipy.interpolate import interp1d
from scipy.optimize import fsolve
tauTable = interp1d(rangeTab, tauTab, kind = 'linear')
Range = fsolve(_rangeEquationCalc, rangeGuess,
args = (Intensity,Irradiance,tauTable,n,np.max(rangeTab),))
#near the bottom (minimum) range of the table
if(Range < rangeTab[2] ):
Range = - Range
# beyond the top of the range table
if(Range > rangeTab[-1] ):
Range = - Range
return Range
##############################################################################
##
def _rangeEquationCalc(r,i,e,tauTable,n,rMax):
if r > rMax:
return 0
return i * tauTable(r) / (r ** n) - e
##############################################################################
##
def TFromAbshumidity(AH, equationSelect = 1):
"""temperature in [K] between 248 K and 342 K, given atmopsheric absolute humidity [g/m3], assuming 100% RH
This function uses two similar equations, but with different constants.
Args:
| AH (float): absolute humidity in g/m3.
| equationSelect (int): select the equation to be used.
Returns:
| temperature (float): in K
Raises:
| No exception is raised.
"""
T = np.linspace(248., 342., 100 )
absLUT = abshumidity(T, equationSelect = equationSelect)
f = interpolate.interp1d(absLUT, T,bounds_error=True)
return f(AH)
##############################################################################
##
def abshumidity(T, equationSelect = 1):
""" Atmopsheric absolute humidity [g/m3] for temperature in [K] between 248 K and 342 K.
This function provides two similar equations, but with different constants.
Args:
| temperature (np.array[N,] or [N,1]): in [K].
| equationSelect (int): select the equation to be used.
Returns:
| absolute humidity (np.array[N,] or [N,1]): abs humidity in [g/m3]
Raises:
| No exception is raised.
"""
#there are two options, the fist one seems more accurate (relative to test set)
if equationSelect == 1:
#http://www.vaisala.com/Vaisala%20Documents/Application%20notes/Humidity_Conversion_Formulas_B210973EN-D.pdf
return ( 1325.2520998 * 10 **(7.5892*(T - 273.15)/(T -32.44)))/T
else:
#http://www.see.ed.ac.uk/~shs/Climate%20change/Data%20sources/Humidity%20with%20altidude.pdf
return (1324.37872 * 2.718281828459046 **(17.67*(T - 273.16)/(T - 29.66)))/T
##############################################################################
##
def sfilter(spectral,center, width, exponent=6, taupass=1.0, \
taustop=0.0, filtertype = 'bandpass' ):
""" Calculate a symmetrical filter response of shape exp(-x^n)
Given a number of parameters, calculates maximally flat,
symmetrical transmittance. The function parameters controls
the width, pass-band and stop-band transmittance and sharpness
of cutoff. This function is not meant to replace the use of
properly measured filter responses, but rather serves as a
starting point if no other information is available.
This function does not calculate ripple in the pass-band
or cut-off band.
Filter types supported include band pass, high (long) pass and
low (short) pass filters. High pass filters have maximal
transmittance for all spectral values higher than the central
value. Low pass filters have maximal transmittance for all
spectral values lower than the central value.
Args:
| spectral (np.array[N,] or [N,1]): spectral vector in [um] or [cm-1].
| center (float): central value for filter passband
| width (float): proportional to width of filter passband
| exponent (float): even integer, define the sharpness of cutoff.
| If exponent=2 then gaussian
| If exponent=infinity then square
| taupass (float): the transmittance in the pass band (assumed constant)
| taustop (float): peak transmittance in the stop band (assumed constant)
| filtertype (string): filter type, one of 'bandpass', 'lowpass' or 'highpass'
Returns:
| transmittance (np.array[N,] or [N,1]): transmittances at "spectral" intervals.
Raises:
| No exception is raised.
| If an invalid filter type is specified, return None.
| If negative spectral is specified, return None.
"""
maxexp = np.log(sys.float_info.max)/np.log(np.max(2*np.abs(spectral-center)/width))
# minexp = np.log(sys.float_info.min)/np.log(np.min(2*(spectral-center)/width))
exponent = maxexp if exponent > maxexp else exponent
# exponent = minexp if exponent < minexp else exponent
tau = taustop+(taupass-taustop)*np.exp(-(2*np.abs(spectral-center)/width)**exponent)
maxtau=np.max(tau)
if filtertype == 'bandpass':
pass
elif filtertype == 'lowpass':
tau = tau * np.greater(spectral,center) + \
maxtau * np.ones(spectral.shape) * np.less(spectral,center)
elif filtertype == 'highpass':
tau = tau * np.less(spectral,center) + \
maxtau * np.ones(spectral.shape) * np.greater(spectral,center)
else:
return None
return tau
##############################################################################
##
def responsivity(wavelength,lwavepeak, cuton=1, cutoff=20, scaling=1.0):
""" Calculate a photon detector wavelength spectral responsivity
Given a number of parameters, calculates a shape that is somewhat similar to a photon
detector spectral response, on wavelength scale. The function parameters controls the
cutoff wavelength and shape of the response. This function is not meant to replace the use
of properly measured spectral responses, but rather serves as a starting point if no other
information is available.
Args:
| wavelength (np.array[N,] or [N,1]): vector in [um].
| lwavepeak (float): approximate wavelength at peak response
| cutoff (float): cutoff strength beyond peak, 5 < cutoff < 50
| cuton (float): cuton sharpness below peak, 0.5 < cuton < 5
| scaling (float): scaling factor
Returns:
| responsivity (np.array[N,] or [N,1]): responsivity at wavelength intervals.
Raises:
| No exception is raised.
"""
responsivity=scaling *( ( wavelength / lwavepeak) **cuton - ( wavelength / lwavepeak) **cutoff)
responsivity= responsivity * (responsivity > 0)
return responsivity
################################################################
##
def effectiveValue(spectraldomain, spectralToProcess, spectralBaseline):
"""Normalise a spectral quantity to a scalar, using a weighted mapping by another spectral quantity.
Effectivevalue = integral(spectralToProcess * spectralBaseline) / integral( spectralBaseline)
The data in spectralToProcess and spectralBaseline must both be sampled at the same
domain values as specified in spectraldomain.
The integral is calculated with numpy/scipy trapz trapezoidal integration function.
Args:
| inspectraldomain (np.array[N,] or [N,1]): spectral domain in wavelength, frequency or wavenumber.
| spectralToProcess (np.array[N,] or [N,1]): spectral quantity to be normalised
| spectralBaseline (np.array[N,] or [N,1]): spectral serving as baseline for normalisation
Returns:
| (float): effective value
| Returns None if there is a problem
Raises:
| No exception is raised.
"""
num=np.trapz(spectralToProcess.reshape(-1, 1)*spectralBaseline.reshape(-1, 1),spectraldomain, axis=0)[0]
den=np.trapz(spectralBaseline.reshape(-1, 1),spectraldomain, axis=0)[0]
return num/den
################################################################
##
def convertSpectralDomain(inspectraldomain, type=''):
"""Convert spectral domains, i.e. between wavelength [um], wavenummber [cm^-1] and frequency [Hz]
In string variable type, the 'from' domain and 'to' domains are indicated each with a single letter:
'f' for temporal frequency, 'l' for wavelength and 'n' for wavenumber
The 'from' domain is the first letter and the 'to' domain the second letter.
Note that the 'to' domain vector is a direct conversion of the 'from' domain
to the 'to' domain (not interpolated or otherwise sampled.
Args:
| inspectraldomain (np.array[N,] or [N,1]): spectral domain in wavelength, frequency or wavenumber.
| wavelength vector in [um]
| frequency vector in [Hz]
| wavenumber vector in [cm^-1]
| type (string): specify from and to domains:
| 'lf' convert from wavelength to per frequency
| 'ln' convert from wavelength to per wavenumber
| 'fl' convert from frequency to per wavelength
| 'fn' convert from frequency to per wavenumber
| 'nl' convert from wavenumber to per wavelength
| 'nf' convert from wavenumber to per frequency
Returns:
| [N,1]: outspectraldomain
| Returns zero length array if type is illegal, i.e. not one of the expected values
Raises:
| No exception is raised.
"""
#use dictionary to switch between options, lambda fn to calculate, default zero
outspectraldomain = {
'lf': lambda inspectraldomain: constants.c / (inspectraldomain * 1.0e-6),
'ln': lambda inspectraldomain: (1.0e4/inspectraldomain),
'fl': lambda inspectraldomain: constants.c / (inspectraldomain * 1.0e-6),
'fn': lambda inspectraldomain: (inspectraldomain / 100) / constants.c ,
'nl': lambda inspectraldomain: (1.0e4/inspectraldomain),
'nf': lambda inspectraldomain: (inspectraldomain * 100) * constants.c,
}.get(type, lambda inspectraldomain: np.zeros(shape=(0, 0)) )(inspectraldomain)
return outspectraldomain
################################################################
##
def convertSpectralDensity(inspectraldomain, inspectralquantity, type=''):
"""Convert spectral density quantities, i.e. between W/(m^2.um), W/(m^2.cm^-1) and W/(m^2.Hz).
In string variable type, the 'from' domain and 'to' domains are indicated each with a
single letter:
'f' for temporal frequency, 'w' for wavelength and ''n' for wavenumber
The 'from' domain is the first letter and the 'to' domain the second letter.
The return values from this function are always positive, i.e. not mathematically correct,
but positive in the sense of radiance density.
The spectral density quantity input is given as a two vectors: the domain value vector
and the density quantity vector. The output of the function is also two vectors, i.e.
the 'to' domain value vector and the 'to' spectral density. Note that the 'to' domain
vector is a direct conversion of the 'from' domain to the 'to' domain (not interpolated
or otherwise sampled).
Args:
| inspectraldomain (np.array[N,] or [N,1]): spectral domain in wavelength,
frequency or wavenumber.
| inspectralquantity (np.array[N,] or [N,1]): spectral density in same domain
as domain vector above.
| wavelength vector in [um]
| frequency vector in [Hz]
| wavenumber vector in [cm^-1]
| type (string): specify from and to domains:
| 'lf' convert from per wavelength interval density to per frequency interval density
| 'ln' convert from per wavelength interval density to per wavenumber interval density
| 'fl' convert from per frequency interval density to per wavelength interval density
| 'fn' convert from per frequency interval density to per wavenumber interval density
| 'nl' convert from per wavenumber interval density to per wavelength interval density
| 'nf' convert from per wavenumber interval density to per frequency interval density
Returns:
| ([N,1],[N,1]): outspectraldomain and outspectralquantity
| Returns zero length arrays is type is illegal, i.e. not one of the expected values
Raises:
| No exception is raised.
"""
inspectraldomain = inspectraldomain.reshape(-1,)
inspectralquantity = inspectralquantity.reshape(inspectraldomain.shape[0], -1)
outspectralquantity = np.zeros(inspectralquantity.shape)
# the meshgrid idea does not work well here, because we can have very long
# spectral arrays and these become too large for meshgrid -> size **2
# we have to loop this one
spec = inspectraldomain
for col in range(inspectralquantity.shape[1]):
quant = inspectralquantity[:,col]
#use dictionary to switch between options, lambda fn to calculate, default zero
outspectraldomain = {
'lf': lambda spec: constants.c / (spec * 1.0e-6),
'fn': lambda spec: (spec / 100) / constants.c ,
'nl': lambda spec: (1.0e4/spec),
'ln': lambda spec: (1.0e4/spec),
'nf': lambda spec: (spec * 100) * constants.c,
'fl': lambda spec: constants.c / (spec * 1.0e-6),
}.get(type, lambda spec: np.zeros(shape=(0, 0)) )(spec)
outspectralquantity[:, col] = {
'lf': lambda quant: quant / (constants.c *1.0e-6 / ((spec * 1.0e-6)**2)),
'fn': lambda quant: quant * (100 *constants.c),
'nl': lambda quant: quant / (1.0e4 / spec**2) ,
'ln': lambda quant: quant / (1.0e4 / spec**2) ,
'nf': lambda quant: quant / (100 * constants.c),
'fl': lambda quant: quant / (constants.c *1.0e-6 / ((spec * 1.0e-6)**2)),
}.get(type, lambda quant: np.zeros(shape=(0, 0)) )(quant)
return (outspectraldomain,outspectralquantity)
##############################################################################
##
def savitzkyGolay1D(y, window_size, order, deriv=0, rate=1):
r"""Smooth (and optionally differentiate) data with a Savitzky-Golay filter.
Source: http://wiki.scipy.org/Cookbook/SavitzkyGolay
The Savitzky Golay filter is a particular type of low-pass filter,
well adapted for data smoothing. For further information see:
http://www.wire.tu-bs.de/OLDWEB/mameyer/cmr/savgol.pdf
The Savitzky-Golay filter removes high frequency noise from data.
It has the advantage of preserving the original shape and
features of the signal better than other types of filtering
approaches, such as moving averages techniques.
The Savitzky-Golay is a type of low-pass filter, particularly
suited for smoothing noisy data. The main idea behind this
approach is to make for each point a least-square fit with a
polynomial of high order over a odd-sized window centered at
the point.
Examples:
t = np.linspace(-4, 4, 500)
y = np.exp( -t**2 ) + np.random.normal(0, 0.05, t.shape)
ysg = savitzky_golay(y, window_size=31, order=4)
import matplotlib.pyplot as plt
plt.plot(t, y, label='Noisy signal')
plt.plot(t, np.exp(-t**2), 'k', lw=1.5, label='Original signal')
plt.plot(t, ysg, 'r', label='Filtered signal')
plt.legend()
plt.show()
References:
[1] <NAME>, <NAME>, Smoothing and Differentiation of
Data by Simplified Least Squares Procedures. Analytical
Chemistry, 1964, 36 (8), pp 1627-1639.
[2] Numerical Recipes 3rd Edition: The Art of Scientific Computing
<NAME>, <NAME>, <NAME>, <NAME>
Cambridge University Press ISBN-13: 9780521880688
Args:
| y : array_like, shape (N,) the values of the time history of the signal.
| window_size : int the length of the window. Must be an odd integer number.
| order : int the order of the polynomial used in the filtering.
Must be less then `window_size` - 1.
| deriv: int the order of the derivative to compute (default = 0 means only smoothing)
Returns:
| ys : ndarray, shape (N) the smoothed signal (or it's n-th derivative).
Raises:
| Exception raised for window size errors.
"""
import numpy as np
from math import factorial
try:
window_size = np.abs(np.int(window_size))
order = np.abs(np.int(order))
except ValueError as msg:
raise ValueError("window_size and order have to be of type int")
if window_size % 2 != 1 or window_size < 1:
raise TypeError("window_size size must be a positive odd number")
if window_size < order + 2:
raise TypeError("window_size is too small for the polynomials order")
order_range = list(range(order+1))
half_window = (window_size -1) // 2
# precompute coefficients
b = np.mat([[k**i for i in order_range] for k in range(-half_window, half_window+1)])
m = np.linalg.pinv(b).A[deriv] * rate**deriv * factorial(deriv)
# pad the signal at the extremes with
# values taken from the signal itself
firstvals = y[0] - np.abs( y[1:half_window+1][::-1] - y[0] )
lastvals = y[-1] + np.abs(y[-half_window-1:-1][::-1] - y[-1])
y = np.concatenate((firstvals, y, lastvals))
return np.convolve( m[::-1], y, mode='valid')
##############################################################################
##
def getFHWM(wl,tau,normaliseMax=False):
"""Given spectral domain and range, determine full-width half-max domain width
Returns the FWHM, and the two 50% wavelengths
"""
# get FWHM https://stackoverflow.com/questions/53445337/implementation-of-a-threshold-detection-function-in-python
if normaliseMax:
tau = tau / np.max(tau)
mask = np.diff(1 * (tau > 0.5) != 0)
wlcr = np.vstack((wl[:-1][mask],wl[1:][mask]))
spcr = np.vstack((tau[:-1][mask],tau[1:][mask]))
lamh = np.zeros((2,))
# interpolate to get 0.5 crossing
for i in [0,1]:
lamh[i] = wlcr[0,i]+(wlcr[1,i]-wlcr[0,i])*(0.5-spcr[0,i])/(spcr[1,i]-spcr[0,i])
fwhm = lamh[1] - lamh[0]
return np.abs(fwhm),lamh[0], lamh[1]
##############################################################################
##
def convolve(inspectral, samplingresolution, inwinwidth, outwinwidth, windowtype=np.bartlett):
""" Convolve (non-circular) a spectral variable with a window function,
given the input resolution and input and output window widths.
This function is normally used on wavenumber-domain spectral data. The spectral
data is assumed sampled at samplingresolution wavenumber intervals.
The inwinwidth and outwinwidth window function widths are full width half-max (FWHM)
for the window functions for the inspectral and returned spectral variables, respectively.
The Bartlett function is used as default, but the user can use a different function.
The Bartlett function is a triangular function reaching zero at the ends. Window function
width is correct for Bartlett and only approximate for other window functions.
Spectral convolution is best done in frequency domain ([cm-1] units) because
the filter or emission line shapes have better symmetry in frequency domain than
in wavelength domain.
The input spectral vector must be in spectral density units of cm-1.
Args:
| inspectral (np.array[N,] or [N,1]): spectral variable input vector (e.g., radiance or transmittance).
| samplingresolution (float): wavenumber interval between inspectral samples
| inwinwidth (float): FWHM window width used to obtain the input spectral vector (e.g., spectroradiometer window width)
| outwinwidth (float): FWHM window width of the output spectral vector after convolution
| windowtype (function): name of a numpy/scipy function for the window function
Returns:
| outspectral (np.array[N,]): input vector, filtered to new window width.
| windowfn (np.array[N,]): The window function used.
Raises:
| No exception is raised.
"""
winbins = round(2*(outwinwidth/(inwinwidth*samplingresolution)), 0)
winbins = winbins if winbins%2==1 else winbins+1
windowfn=windowtype(winbins)
#np.convolve is unfriendly towards unicode strings
if sys.version_info[0] > 2:
cmode='same'
else:
cmode='same'.encode('utf-8')
outspectral = np.convolve(windowfn/(samplingresolution*windowfn.sum()),
inspectral.reshape(-1, ),mode=cmode)
return outspectral, windowfn
######################################################################################
def circ(x, y, d=1):
""" Generation of a circular aperture.
Args:
| x (np.array[N,M]): x-grid, metres
| y (np.array[N,M]): y-grid, metres
| d (float): diameter in metres.
| comment (string): the symbol used to comment out lines, default value is None.
| delimiter (string): delimiter used to separate columns, default is whitespace.
Returns:
| z (np.array[N,M]): z-grid, 1's inside radius, meters/pixels.
Raises:
| No exception is raised.
Author: Prof. <NAME>, revised/ported by <NAME>
Original source: http://arxiv.org/pdf/1412.4031.pdf
"""
z = None
r = np.sqrt(x ** 2 + y ** 2)
z = np.zeros(r.shape)
z[r < d / 2.] = 1.0
z[r == d / 2.] = 0.5
return z
######################################################################################
def rect(x, y, sx=1, sy=1):
""" Generation of a rectangular aperture.
Args:
| x (np.array[N,M]): x-grid, metres
| y (np.array[N,M]): x-grid, metres
| sx (float): full size along x.
| sy (float): full size along y.
Returns:
| Nothing.
Raises:
| No exception is raised.
Author: <NAME>
Original source: http://arxiv.org/pdf/1412.4031.pdf
"""
z = None
if x is not None and y is not None:
z = np.zeros(x.shape)
z[np.logical_and(np.abs(x) < sx/2.,np.abs(y) < sy/2.)] = 1.
z[np.logical_and(np.abs(x) == sx/2., np.abs(y) == sy/2.)] = 0.5
return z
######################################################################################################
def poissonarray(inp, seedval=None, tpoint=1000):
r"""This routine calculates a Poisson random variable for an array of input values
with potentially very high event counts.
At high mean values the Poisson distribution calculation overflows. For
mean values exceeding 1000, the Poisson distribution may be approximated by a
Gaussian distribution.
The function accepts a two-dimensional array and calculate a separate random
value for each element in the array, using the element value as the mean value.
A typical use case is when calculating shot noise for image data.
From http://en.wikipedia.org/wiki/Poisson_distribution#Related_distributions
For sufficiently large values of :math:`\lambda`, (say :math:`\lambda>1000`),
the normal distribution with mean :math:`\lambda` and
variance :math:`\lambda` (standard deviation :math:`\sqrt{\lambda}`)
is an excellent approximation to the Poisson distribution.
If :math:`\lambda` is greater than about 10, then the normal distribution
is a good approximation if an appropriate continuity correction is performed, i.e.,
:math:`P(X \le x)`, where (lower-case) x is a non-negative integer, is replaced by
:math:`P(X\le\,x+0.5)`.
:math:`F_\mathrm{Poisson}(x;\lambda)\approx\,F_\mathrm{normal}(x;\mu=\lambda,\sigma^2=\lambda)`
This function returns values of zero when the input is zero.
Args:
| inp (np.array[N,M]): array with mean value
| seedval (int): seed for random number generator, None means use system time.
| tpoint (int): Threshold when to switch over between Poisson and Normal distributions
Returns:
| outp (np.array[N,M]): Poisson random variable for given mean value
Raises:
| No exception is raised.
Author: <NAME>
"""
#If seed is omitted or None, current system time is used
np.random.seed(seedval)
#this is a bit of a mess:
# - for values smaller than tpoint calculate using standard Poisson distribution
# - for values larger than tpoint but nonzero use normal approximation, add small sdelta to avoid variance==0
# - for values larger than tpoint but zero keep at zero, sdelta added has no effect, just avoids zero divide
sdelta = 1e-10
outp = np.zeros(inp.shape)
outp = (inp<=tpoint) * np.random.poisson(inp * (inp<=tpoint) )\
+ ((inp>tpoint) & (inp!=0)) * np.random.normal(loc=inp, scale=np.sqrt(inp+sdelta))
outp = np.where(inp==0, 0., outp)
return outp
######################################################################################################
def draw_siemens_star(outfile, n, dpi):
r"""Siemens star chart generator
by <NAME>, http://cmp.felk.cvut.cz/~wagnelib/utils/star.html
Args:
| outfile (str): output image filename (monochrome only)
| n (int): number of spokes in the output image.
| dpi (int): dpi in output image, determines output image size.
Returns:
| Nothing, creates a monochrome siemens star image
Raises:
| No exception is raised.
Author: <NAME>, adapted by <NAME>
"""
from scipy import misc
# Create figure and add patterns
fig, ax = plt.subplots()
ax.add_collection(gen_siemens_star((0,0), 1., n))
plt.axis('equal')
plt.axis([-1.03, 1.03, -1.03, 1.03])
plt.axis('off')
fig.savefig(outfile, figsize=(900,900), papertype='a0', bbox_inches='tight', dpi=dpi)
#read image back in order to crop to spokes only
imgIn = np.abs(255 - misc.imread(outfile)[:,:,0])
nz0 = np.nonzero(np.sum(imgIn,axis=0))
nz1 = np.nonzero(np.sum(imgIn,axis=1))
imgOut = imgIn[(nz1[0][0]-1) : (nz1[0][-1]+2), (nz0[0][0]-1) : (nz0[0][-1]+2)]
imgOut = np.abs(255 - imgOut)
misc.imsave(outfile, imgOut)
######################################################################################################
def gen_siemens_star(origin, radius, n):
centres = np.linspace(0, 360, n+1)[:-1]
step = (((360.0)/n)/4.0)
patches = []
for c in centres:
patches.append(Wedge(origin, radius, c-step, c+step))
return PatchCollection(patches, facecolors='k', edgecolors='none')
######################################################################################################
def drawCheckerboard(rows, cols, numPixInBlock, imageMode, colour1, colour2, imageReturnType='image',datatype=np.uint8):
"""Draw checkerboard with 8-bit pixels
From http://stackoverflow.com/questions/2169478/how-to-make-a-checkerboard-in-numpy
Args:
| rows (int) : number or rows in checkerboard
| cols (int) : number of columns in checkerboard
| numPixInBlock (int) : number of pixels to be used in one block of the checkerboard
| imageMode (string) : PIL image mode [e.g. L (8-bit pixels, black and white), RGB (3x8-bit pixels, true color)]
| colour1 (int or RGB tuple) : colour 1 specified according to the imageMode
| colour2 (int or RGB tuple) : colour 2 specified according to the imageMode
| imageReturnType: 'image' for PIL image, 'nparray' for numpy array
| datatype (numpy data type) : numpy data type for the returned np.array
Returns:
| img : checkerboard numpy array or PIL image (see imageReturnType)
Raises:
| No exception is raised.
Example Usage:
rows = 5
cols = 7
pixInBlock = 4
color1 = 0
color2 = 255
img = drawCheckerboard(rows,cols,pixInBlock,'L',color1,color2,'nparray')
pilImg = Img.fromarray(img, 'L')
pilImg.save('{0}.png'.format('checkerboardL'))
color1 = (0,0,0)
color2 = (255,255,255)
pilImage = drawCheckerboard(rows,cols,pixInBlock,'RGB',color1,color2,'image')
pilImage.save('{0}.png'.format('checkerboardRGB'))
"""
width = numPixInBlock * cols
height = numPixInBlock * rows
coords = np.ogrid[0:height, 0:width]
idx = (coords[0] // numPixInBlock + coords[1] // numPixInBlock) % 2
vals = np.array([colour1, colour2], dtype=datatype)
img = vals[idx]
if (imageReturnType == 'nparray'):
return img
else:
from PIL import Image as Img
pilImage = Img.fromarray(img, imageMode)
return pilImage
######################################################################################################
def extractGraph(filename, xmin, xmax, ymin, ymax, outfile=None,doPlot=False,\
xaxisLog=False, yaxisLog=False, step=None, value=None):
"""Scan an image containing graph lines and produce (x,y,value) data.
This function processes an image, calculate the location of pixels on a
graph line, and then scale the (r,c) or (x,y) values of pixels with non-zero
values. The
Get a bitmap of the graph (scan or screen capture).
Take care to make the graph x and y axes horizontal/vertical.
The current version of the software does not work with rotated images.
Bitmap edit the graph. Clean the graph to the maximum extent possible,
by removing all the clutter, such that only the line to be scanned is visible.
Crop only the central block that contains the graph box, by deleting
the x and y axes notation and other clutter. The size of the cropped image
must cover the range in x and y values you want to cover in the scan. The
graph image/box must be cut out such that the x and y axes min and max
correspond exactly with the edges of the bitmap.
You must end up with nothing in the image except the line you want
to digitize.
The current version only handles single lines on the graph, but it does
handle vertical and horizontal lines.
The function can also write out a value associated with the (x,y) coordinates
of the graph, as the third column. Normally these would have all the same
value if the line represents an iso value.
The x,y axes can be lin/lin, lin/log, log/lin or log/log, set the flags.
Args:
| filename: name of the image file
| xmin: the value corresponding to the left side (column=0)
| xmax: the value corresponding to the right side (column=max)
| ymin: the value corresponding to the bottom side (row=bottom)
| ymax: the value corresponding to the top side (row=top)
| outfile: write the sampled points to this output file
| doPlot: plot the digitised graph for visual validation
| xaxisLog: x-axis is in log10 scale (min max are log values)
| yaxisLog: y-axis is in log10 scale (min max are log values)
| step: if not None only ouput every step values
| value: if not None, write this value as the value column
Returns:
| outA: a numpy array with columns (xval, yval, value)
| side effect: a file may be written
| side effect: a graph may be displayed
Raises:
| No exception is raised.
Author: <EMAIL>
"""
from scipy import ndimage
from skimage.morphology import medial_axis
if doPlot:
import pylab
import matplotlib.pyplot as pyplot
#read image file, as grey scale
img = ndimage.imread(filename, True)
# find threshold 50% up the way
halflevel = img.min() + (img.max()-img.min()) /2
# form binary image by thresholding
img = img < halflevel
#find the skeleton one pixel wide
imgskel = medial_axis(img)
#if doPlot:
# pylab.imshow(imgskel)
# pylab.gray()
# pylab.show()
# set up indices arrays to get x and y indices
ind = np.indices(img.shape)
#skeletonise the graph to one pixel only
#then get the y pixel value, using indices
yval = ind[0,...] * imgskel.astype(float)
#if doPlot:
# pylab.imshow(yval>0)
# pylab.gray()
# pylab.show()
# invert y-axis origin from left top to left bottom
yval = yval.shape[0] - np.max(yval, axis=0)
#get indices for only the pixels where we have data
wantedIdx = np.where(np.sum(imgskel, axis = 0) > 0)
# convert to original graph coordinates
cvec = np.arange(0.0,img.shape[1])
xval = xmin + (cvec[wantedIdx] / img.shape[1]) * (xmax - xmin)
xval = xval.reshape(-1,1)
yval = ymin + (yval[wantedIdx] / img.shape[0]) * (ymax - ymin)
yval = yval.reshape(-1,1)
if xaxisLog:
xval = 10** xval
if yaxisLog:
yval = 10 ** yval
#build the result array
outA = np.hstack((xval,yval))
if value is not None:
outA = np.hstack((outA,value*np.ones(yval.shape)))
# process step intervals
if step is not None:
# collect the first value, every step'th value, and last value
outA = np.vstack((outA[0,:],outA[1:-2:step,:],outA[-1,:]))
#write output file
if outfile is not None > 0 :
np.savetxt(outfile,outA)
if doPlot:
fig = pyplot.figure()
ax=fig.add_subplot(1,1,1)
ax.plot(xval,yval)
if xaxisLog:
ax.set_xscale('log')
if yaxisLog:
ax.set_yscale('log')
pylab.show()
return outA
######################################################################################################
def makemotionsequence(imgfilename, mtnfilename,postfix,intTime,frmTim,outrows,outcols,
imgRatio,pixsize,numsamples,fnPlotInput=None):
r"""Builds a video from a still image and a displacement motion file.
The objective with this function is to create a video sequence from a still
image, as if the camera moved minutely during the sensor integration time.
A static image is moved according to the (x,y) displacement motion in an
input file. The input file must be at least ten times plus a bit
larger than the required output file. The image input file is sampled with
appropriate displacement for each point in the displacement file and pixel vlaues
are accumulated in the output image. All of this temporal displacement and
accumulation takes place in the context of a frame integration time and frame
frequency.
The key requirements for accuracy in this method is an input image with much
higher resolution than the output image, plus a temporal displacement file with
much higher temporal sampling than the sensor integration time.
The function creates a sequence of images that can be used to create a video.
Images are numbered in sequence, using the same base name as the input image.
The sequence is generated in the current working directory.
The function currently processes only monochrome images (M,N) arrays.
The motion data file must be a compressed numpy npz or text file,
with three columns:
First column must be time, then movement along rows, then movement along columns.
The units and scale of the motion columns must be the same units and scale as
the pixel size in the output image.
imgRatio x imgRatio number of pixels in the input (hires) image are summed
together and stored in one output image pixel. In other words if imgRatio is ten,
each pixel in the output image will be the sum of 100 pixels in the imput image.
During one integration time period the hires input image will be sampled at slightly
different offsets (according to the motion file) and accumulated in an intermediate
internal hires file. This intermediate internal file is collapsed as described
above.
The function creates a series-numbered sequence if images that can be used to
construct a video. One easy means to create the video is to use VirtualDub,
available at www.virtualdub.org/index. In VirtualDub open the first image file
in the numbered sequence, VirtualDub will then recognise the complete sequence
as a video. Once loaded in VirtualDub, save the video as avi.
Args:
| imgfilename (str): static image filename (monochrome only)
| mtnfilename (str): motion data filename.
| postfix (str): add this string to the end of the output filename.
| intTime (float): sensor integration time.
| frmTim (float): sensor frame time.
| outrows (int): number of rows in the output image.
| outcols (int): number of columns in the output image.
| imgRatio (float): hires image pixel count block size of one output image pixel
| pixsize (float): pixel size in same units as motion file.
| numsamples (int): number of motion input samples to be processed (-1 for all).
| fnPlotInput (str): output plot filename (None for no plot).
Returns:
| True if successful, message otherwise, creates numbered images in current working directory
Raises:
| No exception is raised.
Author: <NAME>
"""
from scipy import ndimage
from scipy import misc
import os
#read in the image and motion files.
if not os.path.exists(imgfilename):
return '{} not found'.format(imgfilename)
imgIn = misc.imread(imgfilename)
centrow = imgIn.shape[0]/2
centcol = imgIn.shape[1]/2
motionScale = pixsize / imgRatio
if not os.path.exists(mtnfilename):
return '{} not found'.format(mtnfilename)
if '.npz' in mtnfilename:
rcmotion = np.load(mtnfilename)['arr_0']
elif '.txt' in mtnfilename:
rcmotion = np.loadtxt(mtnfilename)
else:
return '{} not in appropriate format'.format(mtnfilename)
mtnfilenamecore = os.path.split(mtnfilename)[1]
mtnfilenamecore = mtnfilenamecore[:mtnfilenamecore.find('.')]
#reset time to start at zero
times = rcmotion[:,0] - rcmotion[0,0]
drows = rcmotion[:,1]
dcols = rcmotion[:,2]
if fnPlotInput is not None:
I = ryplot.Plotter(1,3,1,'', figsize=(6,9))
I.showImage(1, imgIn)
I.plot(2,times,rcmotion[:,1:3],'Input motion','Time s','Displacement',label=['row','col'])
I.plot(3,times,rcmotion[:,1:3]/pixsize,'Input motion','Time s','Displacement pixels',label=['row','col'])
I.saveFig(fnPlotInput)
fullframe = 0
subframes = 0
outimage = np.zeros((outrows*imgRatio,outcols*imgRatio))
if times.shape[0] < numsamples:
numsamples = times.shape[0]
for isample,time in enumerate(times):
if isample <= numsamples:
fracframe = np.floor(time / frmTim)
if fracframe >= fullframe + 1:
#output and reset the present image
outfilename = os.path.split(imgfilename)[1].replace('.png',
'-{}-{}-{:05d}.png'.format(mtnfilenamecore,postfix,fullframe))
outimage = outimage/subframes
saveimage = np.array([[np.sum(vchunk) for vchunk in np.split(hchunk, outrows, 1)]
for hchunk in np.split(outimage, outcols)])/imgRatio**2
misc.imsave(outfilename, saveimage)
outimage = np.zeros((outrows*imgRatio,outcols*imgRatio))
fullframe += 1
subframes = 0
if time - fullframe * frmTim < intTime:
#integrate the frames during integration time
# print('{} {} integrate image {}'.format(time,fracframe, fullframe))
roffs = drows[isample] / motionScale
coffs = dcols[isample] / motionScale
outimage += imgIn[
centrow+roffs-outrows*imgRatio/2:centrow+roffs+outrows*imgRatio/2,
centcol+coffs-outcols*imgRatio/2:centcol+coffs+outcols*imgRatio/2
]
subframes += 1
else:
# this sample is not integrated in the output image
# print('{} {}'.format(time,fracframe))
pass
return True
######################################################################################################
def luminousEfficiency(vlamtype='photopic', wavelen=None, eqnapprox=False):
r"""Returns the photopic luminous efficiency function on wavelength intervals
Type must be one of:
photopic: CIE Photopic V(lambda) modified by Judd (1951) and Vos (1978) [also known as CIE VM(lambda)]
scotopic: CIE (1951) Scotopic V'(lambda)
CIE2008v2: 2 degree CIE "physiologically-relevant" luminous efficiency Stockman & Sharpe
CIE2008v10: 10 degree CIE "physiologically-relevant" luminous efficiency Stockman & Sharpe
For the equation approximations (only photoic and scotopic), if wavelength is not
given a vector is created 0.3-0.8 um.
For the table data, if wavelength is not given a vector is read from the table.
CIE Photopic V(l) modified by Judd (1951) and Vos (1978) [also known as CIE VM(l)]
from http://www.cvrl.org/index.htm
Args:
| vlamtype (str): type of curve required
| wavelen (np.array[]): wavelength in um
| eqnapprox (bool): if False read tables, if True use equation
Returns:
| luminousEfficiency (np.array[]): luminous efficiency
| wavelen (np.array[]): wavelength in um
Raises:
| No exception is raised.
Author: <NAME>
"""
if eqnapprox:
if wavelen is None:
wavelen = np.linspace(0.3, 0.8, 100)
if 'photopic' in vlamtype:
vlam = 1.019 * np.exp(-285.51 * (wavelen - 0.5591) ** 2 ).reshape(-1,)
elif 'scotopic' in vlamtype:
vlam = 0.99234 * np.exp(-321.1 * (wavelen - 0.502) ** 2 ).reshape(-1,)
else:
return None, None
else:
if 'photopic' in vlamtype:
vlamname = 'vljve.csv'
elif 'scotopic' in vlamtype:
vlamname = 'scvle.csv'
elif 'CIE2008v2' in vlamtype:
vlamname = 'linCIE2008v2e_1.csv'
elif 'CIE2008v10' in vlamtype:
vlamname = 'linCIE2008v10e_1.csv'
else:
return None, None
#load data file from the pyradi directories, not local dir
resource_package = 'pyradi' #__name__ ## Could be any module/package name.
resource_path = os.path.join('data', 'photometry',vlamname)
dat = pkg_resources.resource_string(resource_package, resource_path)
if sys.version_info[0] > 2:
dat = np.loadtxt(StringIO(dat.decode('utf-8')),delimiter=",")
else:
dat = np.genfromtxt(StringIO(dat),delimiter=",")
if wavelen is not None:
vlam = np.interp(wavelen*1000., dat[:,0],dat[:,1],left=dat[0,1],right=dat[-1,1])
else:
wavelen = dat[:,0]/1000.
vlam = dat[:,1]
return vlam, wavelen
##############################################################################################
##############################################################################################
##############################################################################################
# to calculate the MTF degradation from the pupil function
def calcMTFwavefrontError(sample, wfdisplmnt, xg, yg, specdef,
samplingStride = 1,clear='Clear'):
"""Given a mirror figure error, calculate MTF degradation from ideal
An aperture has an MTF determined by its shape. A clear aperture has
zero phase delay and the MTF is determined only by the aperture shape.
Any phase delay/error in the wavefront in the aperture will result in a
lower MTF than the clear aperture diffraction MTF.
This function calculates the MTF degradation attributable to a wavefront
error, relative to the ideal aperture MTF.
The optical transfer function is the Fourier transform of the point spread
function, and the point spread function is the square absolute of the inverse
Fourier transformed pupil function. The optical transfer function can also
be calculated directly from the pupil function. From the convolution theorem
it can be seen that the optical transfer function is the autocorrelation of
the pupil function <https://en.wikipedia.org/wiki/Optical_transfer_function>.
The pupil function comprises a masking shape (the binary shape of the pupil)
and a transmittance and spatial phase delay inside the mask. A perfect aperture
has unity transmittance and zero phase delay in the mask. Some pupils have
irregular pupil functions/shapes and hence the diffraction MTF has to be
calculated numerically using images (masks) of the pupil function.
From the OSA Handbook of Optics, Vol II, p 32.4:
For an incoherent optical system, the OTF is proportional to the two-dimensional
autocorrelation of the exit pupil. This calculation can account for any phase
factors across the pupil, such as those arising from aberrations or defocus.
A change of variables is required for the identification of an autocorrelation
(a function of position in the pupil) as a transfer function (a function of
image-plane spatial frequency). The change of variables is
xi = {x}/{lambda d_i}
where $x$ is the autocorrelation shift distance in the pupil, $\lambda$ is
the wavelength, and $d_i$ is the distance from the exit pupil to the image.
A system with an exit pupil of full width $D$ has an image-space cutoff
frequency (at infinite conjugates) of
xi_{cutoff} ={D}/{lambda f}
In this analysis we assume that
1. the sensor is operating at infinite conjugates.
2. the mask falls in the entrance pupil shape.
The MTF is calculated as follows:
1. Read in the pupil function mask and create an image of the mask.
2. Calculate the two-dimensional autocorrelation function of the binary
image (using the SciPy two-dimensional correlation function `signal.correlate2d`).
3. Scale the magnitude and $(x,y)$ dimensions according to the dimensions of
the physical pupil.
The the array containing the wavefront displacement in the pupil must have np.nan
values outside the pupil. The np.nan values are ignored and not included in the
calculation. Obscurations can be modelled by placing np.nan in the obscuration.
The specdef dictionary has a string key to identify (name) the band, with a
single float contents which is the wavelength associated with this band.
Args:
| sample (string): an identifier string to be used in the plots
| wfdisplmnt (nd.array[M,N]): wavefront displacement in m
| xg (nd.array[M,N]): x values from meshgrid, for wfdisplmnt
| yg (nd.array[M,N]): y values from meshgrid, for wfdisplmnt
| specdef (dict): dictionary defining spectral wavelengths
| samplingStride (number): sampling stride to limit size and processing
| clear (string): defines the dict key for clear aperture reference
Returns:
| dictionaries below have entries for all keys in specdef.
| wfdev (dict): subsampled wavefront error in m
| phase (dict): subsampled wavefront error in rad
| pupfn (dict): subsampled complex pupil function
| MTF2D (dict): 2D MTF in (x,y) format
| MTFpol (dict): 2D MTF in (r,theta) format
| specdef (): specdef dictionary as passed plus clear entry
| MTFmean (dict): mean MTF across all rotation angles
| rho (nd.array[M,]): spatial frequency scale in cy/mrad
| fcrit (float): cutoff or critical spatial frequency cy/mrad
| clear (string): key used to signify the clear aperture case.
Raises:
| No exception is raised.
"""
from scipy import signal
import pyradi.ryplot as ryplot
error = {}
wfdev = {}
phase = {}
pupfn = {}
pupfnz = {}
MTF2D = {}
MTFpol = {}
MTFmean = {}
freqfsm = {}
rho = {}
fcrit = {}
pim = ryplot.ProcessImage()
# make the clear case zero error
wfdev[clear] = np.where(np.isnan(wfdisplmnt),np.nan,0)
specdef[clear] = 1e300
# three cases, clear is done for near infinite wavelength (=zero phase)
for specband in specdef:
# the physical deviation/error from the ideal mirror figure
# force nan outside of valid mirror surface
if clear not in specband:
wfdev[specband] = np.where(np.isnan(wfdisplmnt),np.nan,wfdisplmnt)
# resample with stride to reduce processing load
wfdev[specband] = wfdev[specband][::samplingStride,0:wfdev[specband].shape[0]:samplingStride]
# one wavelength error is 2pi rad phase shift
# use physical displacement and wavelength to normalise to # of wavelengths
phase[specband] = np.where(np.isnan(wfdev[specband]), np.nan, 2*np.pi*(wfdev[specband]/specdef[specband]))
# phase into complex pupil function
pupfn[specband] = np.exp(-1j * phase[specband])
# correlation fn does not work if nan in data set, force nan to zero
pupfnz[specband] = np.where(np.isnan(pupfn[specband]),0,pupfn[specband])
# correlation to get optical transfer function
corr = signal.correlate2d(pupfnz[specband], np.conj(pupfnz[specband]), boundary='fill', mode='full')
# normalise and get abs value to get MTF
MTF2D[specband] = np.abs(corr / np.max(corr))
polar_c, _, _ = pim.reprojectImageIntoPolar(
MTF2D[specband].reshape(MTF2D[specband].shape[0],MTF2D[specband].shape[1],1),
None, False,cval=0.)
MTFpol[specband] = polar_c[:,:,0]
MTFmean[specband] = MTFpol[specband].mean(axis=1)
#calculate the aperture diameter, geometric mean along x and y
pdia = np.sqrt(np.abs(np.nanmax(xg)-np.nanmin(xg)) * np.abs( | np.nanmax(yg) | numpy.nanmax |
# %%
import sys
sys.path.append("../../..")
from scipy.linalg import null_space
import copy
import numpy as np
from numpy.linalg import matrix_rank, matrix_power, cholesky, inv
import torch
from torch.optim import Adam
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm
import util.geometry_util as geo_util
import solvers.rigidity_solver.gradient as gradient
from solvers.rigidity_solver.internal_structure import tetrahedron, triangulation_with_torch
from solvers.rigidity_solver.constraints_3d import select_non_colinear_points
from solvers.rigidity_solver import gradient, algo_core as core, extra_constraint
from solvers.rigidity_solver.eigen_analysis import eigen_analysis
from visualization.model_visualizer import visualize_3D, visualize_2D
from matplotlib import pyplot as plt
data = np.array([
[503, 353],
[1067, 27],
[866, 128],
[1067, 167],
[1067, 367],
[261, 432],
]) * 0.01
# mutable
parameter_nodes = {
"up-right-conn": torch.tensor(data[1], dtype=torch.double),
"right-down-node": torch.tensor(data[4], dtype=torch.double),
}
parameter_scalars = {
"sliding-ratio": torch.tensor(0.75, dtype=torch.double),
"main-right-ratio": torch.tensor(0.583, dtype=torch.double),
}
immutable = {
"base-main-conn": torch.tensor(data[5], dtype=torch.double),
"main-up-conn": torch.tensor(data[0], dtype=torch.double),
}
for param in parameter_nodes.values():
param.requires_grad_(True)
part_node_connectivity = {
"main": ("base-main-conn", "main-right-conn"),
"up-left": ("main-up-conn", "up-sliding-conn"),
"up-right": ("up-sliding-conn", "up-right-conn"),
"right": ("up-right-conn", "right-down-node"),
}
def describe_nodes():
nm = {**parameter_nodes, **immutable}
computed_nodes = {
"up-sliding-conn": torch.lerp(nm["main-up-conn"], nm["up-right-conn"], parameter_scalars["sliding-ratio"]),
"main-right-conn": torch.lerp(nm["up-right-conn"], nm["right-down-node"], parameter_scalars["main-right-ratio"]),
}
node_map = {**nm, **computed_nodes}
return node_map
part_map = {}
from collections import namedtuple
Part = namedtuple("Part", "points, edges, index_offset")
Joint = namedtuple("Joint", "pivot, part1_ind, part2_ind, translation, rotation_center")
def empty(_):
return None
joints = [
Joint(lambda nm: nm["main-up-conn"], "main", "up-left", empty, lambda nm: nm["main-up-conn"]),
Joint(lambda nm: nm["up-sliding-conn"], "up-left", "up-right", lambda nm: nm["up-right-conn"] - nm["main-up-conn"],
empty),
Joint(lambda nm: nm["up-right-conn"], "up-right", "right", empty, lambda nm: nm["up-right-conn"]),
Joint(lambda nm: nm["main-right-conn"], "main", "right", empty, lambda nm: nm["main-right-conn"]),
]
def describe_model(part_nodes, only_points=False):
offset = 0
for key, (i, j) in part_node_connectivity.items():
_points, _edges = triangulation_with_torch(part_nodes[i], part_nodes[j], 10, thickness=0.3)
part_map[key] = Part(_points, _edges, offset)
assert not torch.any(torch.isnan(_points)), f"exists nan, {part_nodes[i], part_nodes[j]}"
offset += len(_points)
point_matrix = torch.vstack([part_map[key].points for key in part_node_connectivity.keys()])
assert not torch.any(torch.isnan(point_matrix))
if only_points:
return point_matrix
edge_matrix = torch.vstack([
part_map[key].edges + part_map[key].index_offset for key in part_node_connectivity.keys()])
constraint_point_indices = torch.tensor(np.vstack([
np.concatenate(
[select_non_colinear_points(
part_map[j.part1_ind].points.detach().numpy(),
2,
near=j.pivot(part_nodes).detach().numpy()
)[1] + part_map[j.part1_ind].index_offset,
select_non_colinear_points(
part_map[j.part2_ind].points.detach().numpy(),
2,
near=j.pivot(part_nodes).detach().numpy()
)[1] + part_map[j.part2_ind].index_offset]
) for j in joints
]), dtype=torch.long)
return point_matrix, edge_matrix, constraint_point_indices
def total_length(nodes, connectivity):
len = torch.tensor(0, dtype=torch.double)
for i, j in connectivity.values():
len += torch.norm(nodes[i] - nodes[j])
return len
# %%
# initialization for edges and constraint_point_indices
with torch.no_grad():
nodes = describe_nodes()
points, edges, constraint_point_indices = describe_model(nodes)
init_len = total_length(nodes, part_node_connectivity)
# visualize_2D(points, edges)
# %%
n_iters = 500
optimizer = Adam([
{"params": [*parameter_nodes.values()], "lr": 0.01},
{"params": [*parameter_scalars.values()], "lr": 0.002},
])
traces = []
for it in tqdm(range(n_iters)):
optimizer.zero_grad()
nodes = describe_nodes()
points = describe_model(nodes, only_points=True)
assert not torch.any(torch.isnan(torch.vstack(tuple(nodes.values())))), f"exists nan in nodes, {nodes}"
with torch.no_grad():
joint_constraints = gradient.constraint_matrix(
points,
pivots=[j.pivot(nodes) for j in joints],
translation_vectors=[j.translation(nodes) for j in joints],
rotation_centers=[j.rotation_center(nodes) for j in joints],
joint_point_indices=constraint_point_indices,
)
extra_constraints = torch.vstack([
gradient.rigid_motion(points)
])
constraints = torch.vstack([
joint_constraints,
extra_constraints
])
B = gradient.torch_null_space(constraints)
K = gradient.spring_energy_matrix(points, edges, dim=2)
Q = torch.chain_matmul(B.t(), K, B)
# the eigenvalues are already in ascending order!
eigenvalues, eigenvectors = torch.symeig(Q, eigenvectors=True)
eigind = 1
smallest_eigenvalue = eigenvalues[eigind]
corresponding_eigenvector = torch.mv(B, eigenvectors[:, eigind])
assert not torch.allclose(eigenvalues[eigind],
torch.tensor(0.0, dtype=torch.double),
atol=1e-12), f"more than expected num dof: {eigenvalues}"
length_penalty = 0.001 * torch.pow(total_length(nodes, part_node_connectivity) - init_len, 2)
# Negate eigenvalue in the objective as we're trying to increase it
objective = -smallest_eigenvalue + length_penalty
objective.backward()
optimizer.step()
with torch.no_grad():
for value in parameter_scalars.values():
value.clamp_(0.0, 1.0)
trace = {
"eigenvalue": smallest_eigenvalue.detach().cpu().numpy(),
"eigenvector": corresponding_eigenvector.detach().cpu().numpy(),
"nodes": copy.deepcopy({k: v.detach().numpy() for k, v in nodes.items()}),
"points": points.detach().cpu().numpy(),
}
traces.append(trace)
# %%
# visualize the optimization process
from matplotlib import pyplot as plt
# objective against time
objectives = [t["eigenvalue"] for t in traces]
plt.plot(np.arange(n_iters), objectives)
# plt.show()
# shape of the triangle against time
def plot_shape(ax, vertices, edges):
for a, b in edges:
p, q = vertices[a], vertices[b]
ax.plot([p[0], q[0]], [p[1], q[1]], color=[0, 1, 0])
plt.clf()
fig, ax = plt.subplots()
ax.set(xlim=(0, 1.5), ylim=(0, 2))
ax.axis('equal')
ax.axis('off')
for key in nodes:
plt.cla()
ax.axis('equal')
ax.axis('off')
points = np.array([t["nodes"][key] for t in traces])
points_x = points[:, 0]
points_y = points[:, 1]
ax.plot(points_x, points_y, color="black")
plt.savefig(f"excavator-{key}-points.svg", transparent=True)
print(traces[0]["nodes"])
print(traces[-1]["nodes"])
for key, (i, j) in part_node_connectivity.items():
print(key, traces[-1]["nodes"][i], traces[-1]["nodes"][j], np.linalg.norm(traces[-1]["nodes"][i] - traces[-1]["nodes"][j]))
for key, (i, j) in part_node_connectivity.items():
print(key, traces[0]["nodes"][i], traces[0]["nodes"][j], np.linalg.norm(traces[0]["nodes"][i] - traces[0]["nodes"][j]))
for it in np.round( | np.linspace(0, n_iters - 1, 8) | numpy.linspace |
# !pip3 install streamlit
from io import BytesIO
import base64
import datetime
import streamlit as st
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import yfinance as yf # https://pypi.org/project/yfinance/
##############################
# Technical Analysis Classes #
##############################
# https://github.com/bukosabino/ta/blob/master/ta/utils.py
class IndicatorMixin:
"""Util mixin indicator class"""
_fillna = False
def _check_fillna(self, series: pd.Series, value: int = 0) -> pd.Series:
"""Check if fillna flag is True.
Args:
series(pandas.Series): dataset 'Close' column.
value(int): value to fill gaps; if -1 fill values using 'backfill' mode.
Returns:
pandas.Series: New feature generated.
"""
if self._fillna:
series_output = series.copy(deep=False)
series_output = series_output.replace([np.inf, -np.inf], np.nan)
if isinstance(value, int) and value == -1:
series = series_output.fillna(method="ffill").fillna(value=-1)
else:
series = series_output.fillna(method="ffill").fillna(value)
return series
@staticmethod
def _true_range(
high: pd.Series, low: pd.Series, prev_close: pd.Series
) -> pd.Series:
tr1 = high - low
tr2 = (high - prev_close).abs()
tr3 = (low - prev_close).abs()
true_range = pd.DataFrame(
data={"tr1": tr1, "tr2": tr2, "tr3": tr3}).max(axis=1)
return true_range
def dropna(df: pd.DataFrame) -> pd.DataFrame:
"""Drop rows with "Nans" values"""
df = df.copy()
number_cols = df.select_dtypes("number").columns.to_list()
df[number_cols] = df[number_cols][df[number_cols]
< math.exp(709)] # big number
df[number_cols] = df[number_cols][df[number_cols] != 0.0]
df = df.dropna()
return df
def _sma(series, periods: int, fillna: bool = False):
min_periods = 0 if fillna else periods
return series.rolling(window=periods, min_periods=min_periods).mean()
def _ema(series, periods, fillna=False):
min_periods = 0 if fillna else periods
return series.ewm(span=periods, min_periods=min_periods, adjust=False).mean()
def _get_min_max(series1: pd.Series, series2: pd.Series, function: str = "min"):
"""Find min or max value between two lists for each index"""
series1 = np.array(series1)
series2 = np.array(series2)
if function == "min":
output = | np.amin([series1, series2], axis=0) | numpy.amin |
# Copyright 2017 The TensorFlow Lattice Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for TensorFlow Lattice's keypoints_initialization module."""
import math
import os
# Dependency imports
import numpy as np
from tensorflow_lattice.python.lib import keypoints_initialization
from tensorflow.python.estimator.inputs import numpy_io
from tensorflow.python.feature_column import feature_column as feature_column_lib
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
class KeypointsInitializationTestCase(test.TestCase):
def setUp(self):
super(KeypointsInitializationTestCase, self).setUp()
def testMaterializeLocally(self):
num_examples = 100
x = np.random.uniform(0.0, 1.0, size=num_examples)
# Read to the end of a number of epochs.
input_fn = numpy_io.numpy_input_fn(
x={'x': x}, batch_size=13, num_epochs=1, shuffle=False)
results = keypoints_initialization._materialize_locally(
tensors=input_fn(), num_steps=None)
self.assertEqual(len(results['x']), num_examples)
input_fn = numpy_io.numpy_input_fn(
x={'x': x}, batch_size=13, num_epochs=2, shuffle=False)
results = keypoints_initialization._materialize_locally(
tensors=input_fn(), num_steps=None)
self.assertEqual(len(results['x']), 2 * num_examples)
# Read a certain number of steps: just enough to read all data (last
# batch will only be partially fulfilled).
input_fn = numpy_io.numpy_input_fn(
x={'x': x}, batch_size=13, num_epochs=1, shuffle=False)
results = keypoints_initialization._materialize_locally(
tensors=input_fn(), num_steps=1)
self.assertEqual(len(results['x']), 13)
input_fn = numpy_io.numpy_input_fn(
x={'x': x}, batch_size=13, num_epochs=1, shuffle=False)
results = keypoints_initialization._materialize_locally(
tensors=input_fn(), num_steps=8)
self.assertEqual(len(results['x']), num_examples)
# Try to read beyond end of input, with num_steps set.
input_fn = numpy_io.numpy_input_fn(
x={'x': x}, batch_size=13, num_epochs=1, shuffle=False)
with self.assertRaises(errors.OutOfRangeError):
results = keypoints_initialization._materialize_locally(
tensors=input_fn(), num_steps=100)
# Try to read beyond safety limit.
input_fn = numpy_io.numpy_input_fn(
x={'x': x}, batch_size=13, num_epochs=None, shuffle=False)
with self.assertRaises(ValueError):
results = keypoints_initialization._materialize_locally(
tensors=input_fn(), num_steps=None, safety_size=1000)
def _BuildInputs(self, x0, x1, x2):
"""Returns input_fn, feature_names and feature_columns."""
def _input_fn():
return ({
'x0': array_ops.constant(x0, dtype=dtypes.float32),
'x1': array_ops.constant(x1, dtype=dtypes.float32),
'x2': array_ops.constant(x2, dtype=dtypes.float32),
}, None)
feature_names = ['x0', 'x1', 'x2']
feature_columns = set(
[feature_column_lib.numeric_column(key=fn) for fn in feature_names])
return _input_fn, feature_names, feature_columns
def _CheckSaveQuantilesForKeypoints(self, name, num_examples, num_steps, x0,
x1, x2, use_feature_columns, override):
input_fn, feature_names, feature_columns = self._BuildInputs(x0, x1, x2)
save_dir = os.path.join(self.get_temp_dir(), name)
keypoints_initialization.save_quantiles_for_keypoints(
input_fn,
save_dir,
feature_columns=(feature_columns if use_feature_columns else None),
num_quantiles=5,
override=override)
# Check by reading files directly.
subdir = os.path.join(save_dir,
keypoints_initialization._QUANTILES_SUBDIRECTORY)
quantiles_x0 = keypoints_initialization._load_quantiles(subdir, 'x0')
quantiles_x1 = keypoints_initialization._load_quantiles(subdir, 'x1')
quantiles_x2 = keypoints_initialization._load_quantiles(subdir, 'x2')
self.assertAllClose(
quantiles_x0, [0, 2.5**2, 5.**2, 7.5**2, 100.], atol=0.2)
self.assertAllClose(
quantiles_x1,
[1., math.pow(10., 0.5), 10.0,
math.pow(10., 1.5), 100.],
atol=0.2)
# x2 should start with [0,0,...] and end in [..., 1, 1], the middle value
# can be either 0 or 1.
self.assertAllClose(quantiles_x2[0:2], [0., 0.], atol=1e-3)
self.assertAllClose(quantiles_x2[-2:], [1., 1.], atol=1e-3)
# New graph is needed because default graph is changed by save
# keypoints, and self.test_session() will by default try to reuse a cached
# session, with a different graph.
with ops.Graph().as_default() as g:
# Check by using load_keypoints_from_quantiles.
keypoints_init = keypoints_initialization.load_keypoints_from_quantiles(
feature_names,
save_dir,
3,
output_min={
'x0': 0.,
'x1': 1.,
'x2': 7.
},
output_max={
'x0': 1.,
'x1': 10.,
'x2': 13.
})
with self.test_session(graph=g) as sess:
keypoints_init = sess.run(keypoints_init)
self.assertAllClose(keypoints_init['x0'][0], [0, 5.**2, 100.], atol=0.2)
self.assertAllClose(keypoints_init['x0'][1], [0., 0.5, 1.])
self.assertAllClose(keypoints_init['x1'][0], [1., 10.0, 100.], atol=0.2)
self.assertAllClose(keypoints_init['x1'][1], [1., 5.5, 10.])
# Notice x2 only has 2 unique values, so it should have lowered the
# num_keypoints to 2.
self.assertAllClose([0., 1.0], keypoints_init['x2'][0], atol=1e-3)
self.assertAllClose([7., 13.0], keypoints_init['x2'][1], atol=1e-3)
# Check that load_keypoints_from_quantiles don't generate anything
# if num_keypoints is 0 or unset.
with ops.Graph().as_default() as g:
# Check by using load_keypoints_from_quantiles.
keypoints_init = keypoints_initialization.load_keypoints_from_quantiles(
feature_names,
save_dir, {
'x0': 3,
'x2': 3,
'x1': 0
},
output_min={
'x0': 0.,
'x1': 1.,
'x2': 7.
},
output_max={
'x0': 1.,
'x1': 10.,
'x2': 13.
})
with self.test_session(graph=g) as sess:
keypoints_init = sess.run(keypoints_init)
self.assertTrue('x0' in keypoints_init)
self.assertTrue('x2' in keypoints_init)
self.assertTrue('x1' not in keypoints_init)
def testSaveQuantilesForKeypoints(self):
"""Tests quantiles are being calculated correctly."""
num_examples = 100000
num_steps = num_examples / num_examples
# Verify for randomized input: try with/without feature_columns.
x0 = | np.random.uniform(0.0, 10.0, size=num_examples) | numpy.random.uniform |
"""Feature View: show spikes as 2D points in feature space."""
# -----------------------------------------------------------------------------
# Imports
# -----------------------------------------------------------------------------
import operator
import time
import numpy as np
import numpy.random as rdn
from qtools import QtGui, QtCore, show_window
from galry import (Manager, PlotPaintManager, PlotInteractionManager, Visual,
GalryWidget, enforce_dtype, RectanglesVisual,
TextVisual, PlotVisual, AxesVisual, GridVisual, NavigationEventProcessor,
EventProcessor, DataNormalizer)
from kwiklib.dataio.selection import get_indices, select
from kwiklib.dataio.tools import get_array
from klustaviewa.views.common import HighlightManager, KlustaViewaBindings, KlustaView
from kwiklib.utils.colors import COLORMAP_TEXTURE, SHIFTLEN, COLORMAP
from klustaviewa import USERPREF
from kwiklib.utils import logger as log
import klustaviewa
# -----------------------------------------------------------------------------
# Shaders
# -----------------------------------------------------------------------------
VERTEX_SHADER = """
// move the vertex to its position
vec3 position = vec3(0, 0, 0);
position.xy = position0;
vhighlight = highlight;
cmap_vindex = cmap_index;
vmask = mask;
vselection = selection;
// compute the depth: put masked spikes on the background, unmasked ones
// on the foreground on a different layer for each cluster
float depth = 0.;
//if (mask == 1.)
depth = -(cluster_depth + 1) / (nclusters + 10);
position.z = depth;
if ((highlight > 0) || (selection > 0))
gl_PointSize = 5.;
else
gl_PointSize = u_point_size;
// DEBUG
//gl_PointSize = 20;
"""
FRAGMENT_SHADER = """
float index = %CMAP_OFFSET% + cmap_vindex * %CMAP_STEP%;
vec2 index2d = vec2(index, %SHIFT_OFFSET% + (1 + toggle_mask * (1 - vmask) * %SHIFTLEN%) * %SHIFT_STEP%);
if (vhighlight > 0) {{
index2d.y = 0;
out_color = texture2D(cmap, index2d);
out_color.w = .85;
}}
else {{
out_color = texture2D(cmap, index2d);
out_color.w = {0:.3f};
}}
"""
# Background spikes.
VERTEX_SHADER_BACKGROUND = """
// move the vertex to its position
vec3 position = vec3(0, 0, 0);
position.xy = position0;
position.z = 0.;
gl_PointSize = u_point_size;
"""
FRAGMENT_SHADER_BACKGROUND = """
out_color = vec4(.75, .75, .75, alpha);
"""
# -----------------------------------------------------------------------------
# Utility functions
# -----------------------------------------------------------------------------
def polygon_contains_points(polygon, points):
"""Returns the points within a polygon.
Arguments:
* polygon: a Nx2 array with the coordinates of the polygon vertices.
* points: a Nx2 array with the coordinates of the points.
Returns:
* arr: a Nx2 array of booleans with the belonging of every point to
the inside of the polygon.
"""
try:
from matplotlib.path import Path
p = Path(polygon)
return p.contains_points(points)
except:
import matplotlib.nxutils
return matplotlib.nxutils.points_inside_poly(points, polygon)
# -----------------------------------------------------------------------------
# Grid
# -----------------------------------------------------------------------------
def nicenum(x, round=False):
e = np.floor(np.log10(x))
f = x / 10 ** e
eps = 1e-6
if round:
if f < 1.5:
nf = 1.
elif f < 3:
nf = 2.
elif f < 7.:
nf = 5.
else:
nf = 10.
else:
if f < 1 - eps:
nf = 1.
elif f < 2 - eps:
nf = 2.
elif f < 5 - eps:
nf = 5.
else:
nf = 10.
return nf * 10 ** e
def get_ticks(x0, x1):
nticks = 5
r = nicenum(x1 - x0, False)
d = nicenum(r / (nticks - 1), True)
g0 = np.floor(x0 / d) * d
g1 = np.ceil(x1 / d) * d
nfrac = int(max(-np.floor(np.log10(d)), 0))
return np.arange(g0, g1 + .5 * d, d), nfrac
def format_number(x, nfrac=None):
if nfrac is None:
nfrac = 2
if np.abs(x) < 1e-15:
return "0"
elif np.abs(x) > 100.001:
return "%.3e" % x
if nfrac <= 2:
return "%.2f" % x
else:
nfrac = nfrac + int(np.log10(np.abs(x)))
return ("%." + str(nfrac) + "e") % x
def get_ticks_text(x0, y0, x1, y1):
ticksx, nfracx = get_ticks(x0, x1)
ticksy, nfracy = get_ticks(y0, y1)
n = len(ticksx)
text = [format_number(x, nfracx) for x in ticksx]
text += [format_number(x, nfracy) for x in ticksy]
# position of the ticks
coordinates = np.zeros((len(text), 2))
coordinates[:n, 0] = ticksx
coordinates[n:, 1] = ticksy
return text, coordinates, n
class GridEventProcessor(EventProcessor):
def initialize(self):
self.register('Initialize', self.update_axes)
self.register('Pan', self.update_axes)
self.register('Zoom', self.update_axes)
self.register('Reset', self.update_axes)
self.register('Animate', self.update_axes)
self.register(None, self.update_axes)
def update_viewbox(self):
# normalization viewbox
self.normalizer = DataNormalizer()
self.normalizer.normalize(
(0, -1, self.parent.data_manager.duration, 1))
def update_axes(self, parameter):
nav = self.get_processor('navigation')
if not nav:
return
if not self.parent.projection_manager.grid_visible:
return
viewbox = nav.get_viewbox()
x0, y0, x1, y1 = viewbox
x0 = self.normalizer.unnormalize_x(x0)
y0 = self.normalizer.unnormalize_y(y0)
x1 = self.normalizer.unnormalize_x(x1)
y1 = self.normalizer.unnormalize_y(y1)
viewbox = (x0, y0, x1, y1)
text, coordinates, n = get_ticks_text(*viewbox)
coordinates[:,0] = self.normalizer.normalize_x(coordinates[:,0])
coordinates[:,1] = self.normalizer.normalize_y(coordinates[:,1])
# here: coordinates contains positions centered on the static
# xy=0 axes of the screen
position = | np.repeat(coordinates, 2, axis=0) | numpy.repeat |
# Ciholas, Inc. - www.ciholas.com
# Licensed under: creativecommons.org/licenses/by/4.0
# System libraries
import numpy as np
import pyqtgraph as pg
from pyqtgraph.Qt import QtGui, QtCore
# Local libraries
from cdp import LPSTemperatureV1
from network_objects import *
from settings import *
class PlotTemperature(pg.GraphicsWindow):
type = LPSTemperatureV1.type
def __init__(self, serial):
pg.GraphicsWindow.__init__(self)
self.setWindowTitle('CUWB Monitor - Temperature Plot ID: 0x{:08X}'.format(serial))
self.resize(900,500)
self.serial = serial
self.graph_window = self.addPlot(title='C')
self.graph_window.addLegend()
self.graph_window.showGrid(x=True, y=True)
self.temperature = self.graph_window.plot(pen=pg.mkPen('b', width=3), name='Temperature')
self.timer = self.startTimer(QPLOT_FREQUENCY)
self.last_count = UwbNetwork.nodes[self.serial].cdp_pkts_count[LPSTemperatureV1.type]
self.data = deque([], TRAIL_LENGTH)
self.time = deque([], TRAIL_LENGTH)
_current_size = len(UwbNetwork.nodes[self.serial].cdp_pkts[LPSTemperatureV1.type])
for idx in range(_current_size):
self.data.append(UwbNetwork.nodes[self.serial].cdp_pkts[LPSTemperatureV1.type][idx - _current_size].temperature / 480.0 + 42.5)
self.time.append(UwbNetwork.nodes[self.serial].cdp_pkts_time[LPSTemperatureV1.type][idx - _current_size])
def timerEvent(self, e):
if not UwbNetwork.running:
self.killTimer(self.timer)
self.close()
return
_current_size = UwbNetwork.nodes[self.serial].cdp_pkts_count[LPSTemperatureV1.type] - self.last_count
self.last_count = UwbNetwork.nodes[self.serial].cdp_pkts_count[LPSTemperatureV1.type]
if _current_size == 0: return
for idx in range(_current_size):
self.data.append(UwbNetwork.nodes[self.serial].cdp_pkts[LPSTemperatureV1.type][idx - _current_size].temperature / 480.0 + 42.5)
self.time.append(UwbNetwork.nodes[self.serial].cdp_pkts_time[LPSTemperatureV1.type][idx - _current_size])
self.temperature.setData( | np.array(self.time) | numpy.array |
import numpy as np
from sklearn.decomposition import PCA
import matplotlib.pyplot as plt
def shifted_log_diff(rates, shift=2.0):
return | np.log(rates + shift) | numpy.log |
import numpy as np
import scipy.sparse as sps
from scipy.sparse.linalg import LinearOperator
from scipy.optimize._slsqp import slsqp
from scipy.optimize._differentiable_functions import FD_METHODS
from scipy.optimize._hessian_update_strategy import HessianUpdateStrategy
from scipy.optimize._constraints import old_bound_to_new
from scipy.optimize._minimize import standardize_constraints, standardize_bounds
from scipy.optimize._minimize import MemoizeJac
from scipy.optimize import OptimizeResult
def minimize(
fun, x0, args=(), jac=None,
bounds=None, constraints=(), tol=None, options=None
):
"""Minimization of scalar function of one or more variables.
Wrapper of scipy.optimize.minimize implementing shortcuts to the SLSQP
method and extracting the KKT multipliers.
Parameters
----------
fun : callable
The objective function to be minimized.
fun(x, *args) -> float
where x is an 1-D array with shape (n,) and args
is a tuple of the fixed parameters needed to completely
specify the function.
x0 : ndarray, shape (n,)
Initial guess. Array of real elements of size (n,),
where n is the number of independent variables.
args : tuple, optional
Extra arguments passed to the objective function and its
derivatives (fun, jac and hess functions).
jac : {callable, '2-point', '3-point', 'cs', bool}, optional
Method for computing the gradient vector.
If it is a callable, it should be a function that returns the gradient
vector:
jac(x, *args) -> array_like, shape (n,)
where x is an array with shape (n,) and args is a tuple with
the fixed parameters. If jac is a Boolean and is True, fun is
assumed to return a tuple (f, g) containing the objective
function and the gradient.
If None or False, the gradient will be estimated using 2-point finite
difference estimation with an absolute step size.
Alternatively, the keywords {'2-point', '3-point', 'cs'} can be used
to select a finite difference scheme for numerical estimation of the
gradient with a relative step size. These finite difference schemes
obey any specified bounds.
bounds : scipy.optimize.Bounds, optional
Bounds on variables as an instance of Bounds class.
constraints : scipy.optimize.Constraint or List of Constraints, optional
Constraints defined as a single object or a list of objects specifying
constraints to the optimization problem.
Available constraints are:
- LinearConstraint
- NonlinearConstraint
tol : float, optional
Tolerance for termination. When tol is specified, the selected
minimization algorithm sets some relevant solver-specific tolerance(s)
equal to tol. For detailed control, use solver-specific
options.
options : dict, optional
A dictionary of solver options.
maxiter : int
Maximum number of iterations to perform.
disp : bool
Set to True to print convergence messages.
ftol : float
Precision goal for the value of f in the stopping criterion.
eps: float
Step size used for numerical approximation of the Jacobian.
Returns
-------
res : scipy.optimize.OptimizeResult
The optimization result represented as a OptimizeResult object.
Important attributes are: x the solution array, success a
Boolean flag indicating if the optimizer exited successfully and
message which describes the cause of the termination. See
OptimizeResult for a description of other attributes.
"""
x0 = np.atleast_1d(np.asarray(x0))
if x0.dtype.kind in np.typecodes["AllInteger"]:
x0 = np.asarray(x0, dtype=float)
if not isinstance(args, tuple):
args = (args,)
if options is None:
options = {}
# check gradient vector
if callable(jac) or jac in FD_METHODS:
pass
elif jac is True:
# fun returns func and grad
fun = MemoizeJac(fun)
jac = fun.derivative
else:
# default if jac option is not understood
jac = None
# set default tolerances
if tol is not None:
options = dict(options)
options.setdefault('ftol', tol)
constraints = standardize_constraints(constraints, x0, 'slsqp')
remove_vars = False
if bounds is not None:
# SLSQP can't take the finite-difference derivatives when a variable is
# fixed by the bounds. To avoid this issue, remove fixed variables from
# the problem.
# convert to new-style bounds so we only have to consider one case
bounds = standardize_bounds(bounds, x0, 'new')
# determine whether any variables are fixed
i_fixed = (bounds.lb == bounds.ub)
# determine whether finite differences are needed for any grad/jac
fd_needed = (not callable(jac))
for con in constraints:
if not callable(con.get('jac', None)):
fd_needed = True
# If finite differences are ever used, remove all fixed variables
remove_vars = i_fixed.any() and fd_needed
if remove_vars:
x_fixed = (bounds.lb)[i_fixed]
x0 = x0[~i_fixed]
bounds = _remove_from_bounds(bounds, i_fixed)
fun = _remove_from_func(fun, i_fixed, x_fixed)
if callable(jac):
jac = _remove_from_func(jac, i_fixed, x_fixed, remove=1)
# make a copy of the constraints so the user's version doesn't
# get changed. (Shallow copy is ok)
constraints = [con.copy() for con in constraints]
for con in constraints: # yes, guaranteed to be a list
con['fun'] = _remove_from_func(con['fun'], i_fixed,
x_fixed, min_dim=1,
remove=0)
if callable(con.get('jac', None)):
con['jac'] = _remove_from_func(con['jac'], i_fixed,
x_fixed, min_dim=2,
remove=1)
bounds = standardize_bounds(bounds, x0, 'slsqp')
res = _minimize_slsqp(fun, x0, args, jac, bounds, constraints, **options)
if remove_vars:
res.x = _add_to_array(res.x, i_fixed, x_fixed)
res.jac = _add_to_array(res.jac, i_fixed, np.nan)
if 'hess_inv' in res:
res.hess_inv = None
return res
def _minimize_slsqp(
fun, x0, args=(), jac=None, bounds=None, constraints=(),
maxiter=100, ftol=1.0E-6, iprint=1, disp=False,
eps=np.sqrt(np.finfo(float).eps), finite_diff_rel_step=None
):
"""
Minimize a scalar function of one or more variables using Sequential
Least Squares Programming (SLSQP).
Options
-------
ftol : float
Precision goal for the value of f in the stopping criterion.
eps : float
Step size used for numerical approximation of the Jacobian.
disp : bool
Set to True to print convergence messages. If False,
`verbosity` is ignored and set to 0.
maxiter : int
Maximum number of iterations.
finite_diff_rel_step : None or array_like, optional
If `jac in ['2-point', '3-point', 'cs']` the relative step size to
use for numerical approximation of `jac`. The absolute step
size is computed as ``h = rel_step * sign(x0) * max(1, abs(x0))``,
possibly adjusted to fit into the bounds. For ``method='3-point'``
the sign of `h` is ignored. If None (default) then step is selected
automatically.
"""
iter = maxiter - 1
acc = ftol
if not disp:
iprint = 0
# Transform x0 into an array.
x = np.asfarray(x0).flatten()
# SLSQP is sent 'old-style' bounds, 'new-style' bounds are required by
# ScalarFunction
if bounds is None or len(bounds) == 0:
new_bounds = (-np.inf, np.inf)
else:
new_bounds = old_bound_to_new(bounds)
# clip the initial guess to bounds, otherwise ScalarFunction doesn't work
x = np.clip(x, new_bounds[0], new_bounds[1])
# Constraints are triaged per type into a dictionary of tuples
if isinstance(constraints, dict):
constraints = (constraints, )
cons = {'eq': (), 'ineq': ()}
for ic, con in enumerate(constraints):
# check type
try:
ctype = con['type'].lower()
except KeyError as e:
raise KeyError('Constraint %d has no type defined.' % ic) from e
except TypeError as e:
raise TypeError('Constraints must be defined using a '
'dictionary.') from e
except AttributeError as e:
raise TypeError("Constraint's type must be a string.") from e
else:
if ctype not in ['eq', 'ineq']:
raise ValueError("Unknown constraint type '%s'." % con['type'])
# check function
if 'fun' not in con:
raise ValueError('Constraint %d has no function defined.' % ic)
# check Jacobian
cjac = con.get('jac')
if cjac is None:
# approximate Jacobian function. The factory function is needed
# to keep a reference to `fun`, see gh-4240.
def cjac_factory(fun):
def cjac(x, *args):
x = _check_clip_x(x, new_bounds)
if jac in ['2-point', '3-point', 'cs']:
return approx_derivative(fun, x, method=jac, args=args,
rel_step=finite_diff_rel_step,
bounds=new_bounds)
else:
return approx_derivative(fun, x, method='2-point',
abs_step=eps, args=args,
bounds=new_bounds)
return cjac
cjac = cjac_factory(con['fun'])
# update constraints' dictionary
cons[ctype] += ({'fun': con['fun'],
'jac': cjac,
'args': con.get('args', ())}, )
exit_modes = {-1: "Gradient evaluation required (g & a)",
0: "Optimization terminated successfully",
1: "Function evaluation required (f & c)",
2: "More equality constraints than independent variables",
3: "More than 3*n iterations in LSQ subproblem",
4: "Inequality constraints incompatible",
5: "Singular matrix E in LSQ subproblem",
6: "Singular matrix C in LSQ subproblem",
7: "Rank-deficient equality constraint subproblem HFTI",
8: "Positive directional derivative for linesearch",
9: "Iteration limit reached"}
# Set the parameters that SLSQP will need
# _meq_cv: a list containing the length of values each constraint function
_meq_cv = [len(np.atleast_1d(c['fun'](x, *c['args']))) for c in cons['eq']]
_mieq_cv = [len(np.atleast_1d(c['fun'](x, *c['args']))) for c in cons['ineq']]
# meq, mieq: number of equality and inequality constraints
meq = sum(_meq_cv)
mieq = sum(_mieq_cv)
# m = The total number of constraints
m = meq + mieq
# la = The number of constraints, or 1 if there are no constraints
la = np.array([1, m]).max()
# n = The number of independent variables
n = len(x)
# Define the workspaces for SLSQP
n1 = n + 1
mineq = m - meq + n1 + n1
len_w = (3*n1+m)*(n1+1)+(n1-meq+1)*(mineq+2) + 2*mineq+(n1+mineq)*(n1-meq) \
+ 2*meq + n1 + ((n+1)*n)//2 + 2*m + 3*n + 3*n1 + 1
len_jw = mineq
w = np.zeros(len_w)
jw = np.zeros(len_jw)
# Decompose bounds into xl and xu
if bounds is None or len(bounds) == 0:
xl = np.empty(n, dtype=float)
xu = np.empty(n, dtype=float)
xl.fill(np.nan)
xu.fill(np.nan)
else:
bnds = np.array(
[(_arr_to_scalar(l), _arr_to_scalar(u)) for (l, u) in bounds],
dtype=float
)
if bnds.shape[0] != n:
raise IndexError('SLSQP Error: the length of bounds is not '
'compatible with that of x0.')
with np.errstate(invalid='ignore'):
bnderr = bnds[:, 0] > bnds[:, 1]
if bnderr.any():
raise ValueError('SLSQP Error: lb > ub in bounds %s.' %
', '.join(str(b) for b in bnderr))
xl, xu = bnds[:, 0], bnds[:, 1]
# Mark infinite bounds with nans; the Fortran code understands this
infbnd = ~np.isfinite(bnds)
xl[infbnd[:, 0]] = np.nan
xu[infbnd[:, 1]] = np.nan
# ScalarFunction provides function and gradient evaluation
sf = _prepare_scalar_function(fun, x, jac=jac, args=args, epsilon=eps,
finite_diff_rel_step=finite_diff_rel_step,
bounds=new_bounds)
# gh11403 SLSQP sometimes exceeds bounds by 1 or 2 ULP, make sure this
# doesn't get sent to the func/grad evaluator.
wrapped_fun = _clip_x_for_func(sf.fun, new_bounds)
wrapped_grad = _clip_x_for_func(sf.grad, new_bounds)
# Initialize the iteration counter and the mode value
mode = np.array(0, int)
acc = np.array(acc, float)
majiter = np.array(iter, int)
majiter_prev = 0
# Initialize internal SLSQP state variables
alpha = np.array(0, float)
f0 = np.array(0, float)
gs = np.array(0, float)
h1 = np.array(0, float)
h2 = np.array(0, float)
h3 = np.array(0, float)
h4 = np.array(0, float)
t = np.array(0, float)
t0 = np.array(0, float)
tol = np.array(0, float)
iexact = np.array(0, int)
incons = np.array(0, int)
ireset = np.array(0, int)
itermx = np.array(0, int)
line = np.array(0, int)
n1 = np.array(0, int)
n2 = np.array(0, int)
n3 = np.array(0, int)
# Print the header if iprint >= 2
if iprint >= 2:
print("%5s %5s %16s %16s" % ("NIT", "FC", "OBJFUN", "GNORM"))
# mode is zero on entry, so call objective, constraints and gradients
# there should be no func evaluations here because it's cached from
# ScalarFunction
fx = wrapped_fun(x)
g = np.append(wrapped_grad(x), 0.0)
c = _eval_constraint(x, cons)
a = _eval_con_normals(x, cons, la, n, m, meq, mieq)
while 1:
# Call SLSQP
slsqp(m, meq, x, xl, xu, fx, c, g, a, acc, majiter, mode, w, jw,
alpha, f0, gs, h1, h2, h3, h4, t, t0, tol,
iexact, incons, ireset, itermx, line,
n1, n2, n3)
if mode == 1: # objective and constraint evaluation required
fx = wrapped_fun(x)
c = _eval_constraint(x, cons)
if mode == -1: # gradient evaluation required
g = np.append(wrapped_grad(x), 0.0)
a = _eval_con_normals(x, cons, la, n, m, meq, mieq)
if majiter > majiter_prev:
# Print the status of the current iterate if iprint > 2
if iprint >= 2:
print("%5i %5i % 16.6E % 16.6E" % (majiter, sf.nfev,
fx, np.linalg.norm(g)))
# If exit mode is not -1 or 1, slsqp has completed
if abs(mode) != 1:
break
majiter_prev = int(majiter)
# Obtain KKT multipliers
im = 1
il = im + la
ix = il + (n1*n)//2 + 1
ir = ix + n - 1
_kkt_mult = w[ir:ir + m]
# KKT multipliers
w_ind = 0
kkt_multiplier = dict()
for _t, cv in [("eq", _meq_cv), ("ineq", _mieq_cv)]:
kkt = []
for dim in cv:
kkt += [_kkt_mult[w_ind:(w_ind + dim)]]
w_ind += dim
kkt_multiplier[_t] = kkt
# Optimization loop complete. Print status if requested
if iprint >= 1:
print(f"{exit_modes[int(mode)]} (Exit mode {mode})")
print(" Current function value:", fx)
print(" Iterations:", majiter)
print(" Function evaluations:", sf.nfev)
print(" Gradient evaluations:", sf.ngev)
return OptimizeResult(x=x, fun=fx, jac=g[:-1],
nit=int(majiter),
nfev=sf.nfev, njev=sf.ngev, status=int(mode),
message=exit_modes[int(mode)],
success=(mode==0),
kkt=kkt_multiplier)
def _prepare_scalar_function(fun, x0, jac=None, args=(), bounds=None,
epsilon=None, finite_diff_rel_step=None,
hess=None):
"""
Creates a ScalarFunction object for use with scalar minimizers
(BFGS/LBFGSB/SLSQP/TNC/CG/etc).
Parameters
----------
fun : callable
The objective function to be minimized.
``fun(x, *args) -> float``
where ``x`` is an 1-D array with shape (n,) and ``args``
is a tuple of the fixed parameters needed to completely
specify the function.
x0 : ndarray, shape (n,)
Initial guess. Array of real elements of size (n,),
where 'n' is the number of independent variables.
jac : {callable, '2-point', '3-point', 'cs', None}, optional
Method for computing the gradient vector. If it is a callable, it
should be a function that returns the gradient vector:
``jac(x, *args) -> array_like, shape (n,)``
If one of `{'2-point', '3-point', 'cs'}` is selected then the gradient
is calculated with a relative step for finite differences. If `None`,
then two-point finite differences with an absolute step is used.
args : tuple, optional
Extra arguments passed to the objective function and its
derivatives (`fun`, `jac` functions).
bounds : sequence, optional
Bounds on variables. 'new-style' bounds are required.
eps : float or ndarray
If `jac is None` the absolute step size used for numerical
approximation of the jacobian via forward differences.
finite_diff_rel_step : None or array_like, optional
If `jac in ['2-point', '3-point', 'cs']` the relative step size to
use for numerical approximation of the jacobian. The absolute step
size is computed as ``h = rel_step * sign(x0) * max(1, abs(x0))``,
possibly adjusted to fit into the bounds. For ``method='3-point'``
the sign of `h` is ignored. If None (default) then step is selected
automatically.
hess : {callable, '2-point', '3-point', 'cs', None}
Computes the Hessian matrix. If it is callable, it should return the
Hessian matrix:
``hess(x, *args) -> {LinearOperator, spmatrix, array}, (n, n)``
Alternatively, the keywords {'2-point', '3-point', 'cs'} select a
finite difference scheme for numerical estimation.
Whenever the gradient is estimated via finite-differences, the Hessian
cannot be estimated with options {'2-point', '3-point', 'cs'} and needs
to be estimated using one of the quasi-Newton strategies.
Returns
-------
sf : ScalarFunction
"""
if callable(jac):
grad = jac
elif jac in FD_METHODS:
# epsilon is set to None so that ScalarFunction is made to use
# rel_step
epsilon = None
grad = jac
else:
# default (jac is None) is to do 2-point finite differences with
# absolute step size. ScalarFunction has to be provided an
# epsilon value that is not None to use absolute steps. This is
# normally the case from most _minimize* methods.
grad = '2-point'
epsilon = epsilon
if hess is None:
# ScalarFunction requires something for hess, so we give a dummy
# implementation here if nothing is provided, return a value of None
# so that downstream minimisers halt. The results of `fun.hess`
# should not be used.
def hess(x, *args):
return None
if bounds is None:
bounds = (-np.inf, np.inf)
# ScalarFunction caches. Reuse of fun(x) during grad
# calculation reduces overall function evaluations.
sf = ScalarFunction(fun, x0, args, grad, hess,
finite_diff_rel_step, bounds, epsilon=epsilon)
return sf
class ScalarFunction:
"""Scalar function and its derivatives.
This class defines a scalar function F: R^n->R and methods for
computing or approximating its first and second derivatives.
Parameters
----------
fun : callable
evaluates the scalar function. Must be of the form ``fun(x, *args)``,
where ``x`` is the argument in the form of a 1-D array and ``args`` is
a tuple of any additional fixed parameters needed to completely specify
the function. Should return a scalar.
x0 : array-like
Provides an initial set of variables for evaluating fun. Array of real
elements of size (n,), where 'n' is the number of independent
variables.
args : tuple, optional
Any additional fixed parameters needed to completely specify the scalar
function.
grad : {callable, '2-point', '3-point', 'cs'}
Method for computing the gradient vector.
If it is a callable, it should be a function that returns the gradient
vector:
``grad(x, *args) -> array_like, shape (n,)``
where ``x`` is an array with shape (n,) and ``args`` is a tuple with
the fixed parameters.
Alternatively, the keywords {'2-point', '3-point', 'cs'} can be used
to select a finite difference scheme for numerical estimation of the
gradient with a relative step size. These finite difference schemes
obey any specified `bounds`.
hess : {callable, '2-point', '3-point', 'cs', HessianUpdateStrategy}
Method for computing the Hessian matrix. If it is callable, it should
return the Hessian matrix:
``hess(x, *args) -> {LinearOperator, spmatrix, array}, (n, n)``
where x is a (n,) ndarray and `args` is a tuple with the fixed
parameters. Alternatively, the keywords {'2-point', '3-point', 'cs'}
select a finite difference scheme for numerical estimation. Or, objects
implementing `HessianUpdateStrategy` interface can be used to
approximate the Hessian.
Whenever the gradient is estimated via finite-differences, the Hessian
cannot be estimated with options {'2-point', '3-point', 'cs'} and needs
to be estimated using one of the quasi-Newton strategies.
finite_diff_rel_step : None or array_like
Relative step size to use. The absolute step size is computed as
``h = finite_diff_rel_step * sign(x0) * max(1, abs(x0))``, possibly
adjusted to fit into the bounds. For ``method='3-point'`` the sign
of `h` is ignored. If None then finite_diff_rel_step is selected
automatically,
finite_diff_bounds : tuple of array_like
Lower and upper bounds on independent variables. Defaults to no bounds,
(-np.inf, np.inf). Each bound must match the size of `x0` or be a
scalar, in the latter case the bound will be the same for all
variables. Use it to limit the range of function evaluation.
epsilon : None or array_like, optional
Absolute step size to use, possibly adjusted to fit into the bounds.
For ``method='3-point'`` the sign of `epsilon` is ignored. By default
relative steps are used, only if ``epsilon is not None`` are absolute
steps used.
Notes
-----
This class implements a memoization logic. There are methods `fun`,
`grad`, hess` and corresponding attributes `f`, `g` and `H`. The following
things should be considered:
1. Use only public methods `fun`, `grad` and `hess`.
2. After one of the methods is called, the corresponding attribute
will be set. However, a subsequent call with a different argument
of *any* of the methods may overwrite the attribute.
"""
def __init__(self, fun, x0, args, grad, hess, finite_diff_rel_step,
finite_diff_bounds, epsilon=None):
if not callable(grad) and grad not in FD_METHODS:
raise ValueError(
f"`grad` must be either callable or one of {FD_METHODS}."
)
if not (callable(hess) or hess in FD_METHODS
or isinstance(hess, HessianUpdateStrategy)):
raise ValueError(
f"`hess` must be either callable, HessianUpdateStrategy"
f" or one of {FD_METHODS}."
)
if grad in FD_METHODS and hess in FD_METHODS:
raise ValueError("Whenever the gradient is estimated via "
"finite-differences, we require the Hessian "
"to be estimated using one of the "
"quasi-Newton strategies.")
# the astype call ensures that self.x is a copy of x0
self.x = np.atleast_1d(x0).astype(float)
self.n = self.x.size
self.nfev = 0
self.ngev = 0
self.nhev = 0
self.f_updated = False
self.g_updated = False
self.H_updated = False
self._lowest_x = None
self._lowest_f = np.inf
finite_diff_options = {}
if grad in FD_METHODS:
finite_diff_options["method"] = grad
finite_diff_options["rel_step"] = finite_diff_rel_step
finite_diff_options["abs_step"] = epsilon
finite_diff_options["bounds"] = finite_diff_bounds
if hess in FD_METHODS:
finite_diff_options["method"] = hess
finite_diff_options["rel_step"] = finite_diff_rel_step
finite_diff_options["abs_step"] = epsilon
finite_diff_options["as_linear_operator"] = True
# Function evaluation
def fun_wrapped(x):
self.nfev += 1
# Send a copy because the user may overwrite it.
# Overwriting results in undefined behaviour because
# fun(self.x) will change self.x, with the two no longer linked.
fx = fun(np.copy(x), *args)
# Make sure the function returns a true scalar
if not np.isscalar(fx):
try:
fx = np.asarray(fx).item()
except (TypeError, ValueError) as e:
raise ValueError(
"The user-provided objective function "
"must return a scalar value."
) from e
if fx < self._lowest_f:
self._lowest_x = x
self._lowest_f = fx
return fx
def update_fun():
self.f = fun_wrapped(self.x)
self._update_fun_impl = update_fun
self._update_fun()
# Gradient evaluation
if callable(grad):
def grad_wrapped(x):
self.ngev += 1
return np.atleast_1d(grad(np.copy(x), *args))
def update_grad():
self.g = grad_wrapped(self.x)
elif grad in FD_METHODS:
def update_grad():
self._update_fun()
self.ngev += 1
self.g = approx_derivative(fun_wrapped, self.x, f0=self.f,
**finite_diff_options)
self._update_grad_impl = update_grad
self._update_grad()
# Hessian Evaluation
if callable(hess):
self.H = hess(np.copy(x0), *args)
self.H_updated = True
self.nhev += 1
if sps.issparse(self.H):
def hess_wrapped(x):
self.nhev += 1
return sps.csr_matrix(hess(np.copy(x), *args))
self.H = sps.csr_matrix(self.H)
elif isinstance(self.H, LinearOperator):
def hess_wrapped(x):
self.nhev += 1
return hess(np.copy(x), *args)
else:
def hess_wrapped(x):
self.nhev += 1
return np.atleast_2d(np.asarray(hess(np.copy(x), *args)))
self.H = np.atleast_2d(np.asarray(self.H))
def update_hess():
self.H = hess_wrapped(self.x)
elif hess in FD_METHODS:
def update_hess():
self._update_grad()
self.H = approx_derivative(grad_wrapped, self.x, f0=self.g,
**finite_diff_options)
return self.H
update_hess()
self.H_updated = True
elif isinstance(hess, HessianUpdateStrategy):
self.H = hess
self.H.initialize(self.n, 'hess')
self.H_updated = True
self.x_prev = None
self.g_prev = None
def update_hess():
self._update_grad()
self.H.update(self.x - self.x_prev, self.g - self.g_prev)
self._update_hess_impl = update_hess
if isinstance(hess, HessianUpdateStrategy):
def update_x(x):
self._update_grad()
self.x_prev = self.x
self.g_prev = self.g
# ensure that self.x is a copy of x. Don't store a reference
# otherwise the memoization doesn't work properly.
self.x = np.atleast_1d(x).astype(float)
self.f_updated = False
self.g_updated = False
self.H_updated = False
self._update_hess()
else:
def update_x(x):
# ensure that self.x is a copy of x. Don't store a reference
# otherwise the memoization doesn't work properly.
self.x = np.atleast_1d(x).astype(float)
self.f_updated = False
self.g_updated = False
self.H_updated = False
self._update_x_impl = update_x
def _update_fun(self):
if not self.f_updated:
self._update_fun_impl()
self.f_updated = True
def _update_grad(self):
if not self.g_updated:
self._update_grad_impl()
self.g_updated = True
def _update_hess(self):
if not self.H_updated:
self._update_hess_impl()
self.H_updated = True
def fun(self, x):
if not np.array_equal(x, self.x):
self._update_x_impl(x)
self._update_fun()
return self.f
def grad(self, x):
if not np.array_equal(x, self.x):
self._update_x_impl(x)
self._update_grad()
return self.g
def hess(self, x):
if not np.array_equal(x, self.x):
self._update_x_impl(x)
self._update_hess()
return self.H
def fun_and_grad(self, x):
if not np.array_equal(x, self.x):
self._update_x_impl(x)
self._update_fun()
self._update_grad()
return self.f, self.g
def _clip_x_for_func(func, bounds):
# ensures that x values sent to func are clipped to bounds
# this is used as a mitigation for gh11403, slsqp/tnc sometimes
# suggest a move that is outside the limits by 1 or 2 ULP. This
# unclean fix makes sure x is strictly within bounds.
def eval(x):
x = _check_clip_x(x, bounds)
return func(x)
return eval
def _check_clip_x(x, bounds):
if (x < bounds[0]).any() or (x > bounds[1]).any():
return np.clip(x, bounds[0], bounds[1])
return x
def _arr_to_scalar(x):
# If x is a numpy array, return x.item(). This will
# fail if the array has more than one element.
return x.item() if isinstance(x, np.ndarray) else x
def _eval_constraint(x, cons):
# Compute constraints
if cons['eq']:
c_eq = np.concatenate(
[np.atleast_1d(con['fun'](x, *con['args'])) for con in cons['eq']]
)
else:
c_eq = np.zeros(0)
if cons['ineq']:
c_ieq = np.concatenate(
[np.atleast_1d(con['fun'](x, *con['args'])) for con in cons['ineq']]
)
else:
c_ieq = np.zeros(0)
# Now combine c_eq and c_ieq into a single matrix
c = np.concatenate((c_eq, c_ieq))
return c
def _eval_con_normals(x, cons, la, n, m, meq, mieq):
# Compute the normals of the constraints
if cons['eq']:
a_eq = np.vstack(
[con['jac'](x, *con['args']) for con in cons['eq']]
)
else: # no equality constraint
a_eq = np.zeros((meq, n))
if cons['ineq']:
a_ieq = np.vstack(
[con['jac'](x, *con['args']) for con in cons['ineq']]
)
else: # no inequality constraint
a_ieq = np.zeros((mieq, n))
# Now combine a_eq and a_ieq into a single a matrix
if m == 0: # no constraints
a = | np.zeros((la, n)) | numpy.zeros |
import math
import numpy as np
from levinson_durbin import LevinsonDurbin
class SDAR_1Dim(object):
def __init__(self, r, order):
self._r = r
self._mu = | np.random.random() | numpy.random.random |
import batoid
import numpy as np
from test_helpers import timer, init_gpu, rays_allclose, checkAngle, do_pickle
@timer
def test_properties():
rng = np.random.default_rng(5)
size = 10
for i in range(100):
x = rng.normal(size=size)
y = rng.normal(size=size)
z = rng.normal(size=size)
vx = rng.normal(size=size)
vy = rng.normal(size=size)
vz = rng.normal(size=size)
t = rng.normal(size=size)
w = rng.normal(size=size)
fx = rng.normal(size=size)
vig = rng.choice([True, False], size=size)
fa = rng.choice([True, False], size=size)
cs = batoid.CoordSys(
origin=rng.normal(size=3),
rot=batoid.RotX(rng.normal())@batoid.RotY(rng.normal())
)
rv = batoid.RayVector(x, y, z, vx, vy, vz, t, w, fx, vig, fa, cs)
np.testing.assert_array_equal(rv.x, x)
np.testing.assert_array_equal(rv.y, y)
np.testing.assert_array_equal(rv.z, z)
np.testing.assert_array_equal(rv.r[:, 0], x)
np.testing.assert_array_equal(rv.r[:, 1], y)
np.testing.assert_array_equal(rv.r[:, 2], z)
np.testing.assert_array_equal(rv.vx, vx)
np.testing.assert_array_equal(rv.vy, vy)
np.testing.assert_array_equal(rv.vz, vz)
np.testing.assert_array_equal(rv.v[:, 0], vx)
np.testing.assert_array_equal(rv.v[:, 1], vy)
np.testing.assert_array_equal(rv.v[:, 2], vz)
np.testing.assert_array_equal(rv.k[:, 0], rv.kx)
np.testing.assert_array_equal(rv.k[:, 1], rv.ky)
np.testing.assert_array_equal(rv.k[:, 2], rv.kz)
np.testing.assert_array_equal(rv.t, t)
np.testing.assert_array_equal(rv.wavelength, w)
np.testing.assert_array_equal(rv.flux, fx)
np.testing.assert_array_equal(rv.vignetted, vig)
np.testing.assert_array_equal(rv.failed, fa)
assert rv.coordSys == cs
rv._syncToDevice()
do_pickle(rv)
@timer
def test_positionAtTime():
rng = np.random.default_rng(57)
size = 10_000
x = rng.uniform(-1, 1, size=size)
y = rng.uniform(-1, 1, size=size)
z = rng.uniform(-0.1, 0.1, size=size)
vx = rng.uniform(-0.05, 0.05, size=size)
vy = rng.uniform(-0.05, 0.05, size=size)
vz = np.sqrt(1.0 - vx*vx - vy*vy)
# Try with default t=0 first
rv = batoid.RayVector(x, y, z, vx, vy, vz)
np.testing.assert_equal(rv.x, x)
np.testing.assert_equal(rv.y, y)
np.testing.assert_equal(rv.z, z)
np.testing.assert_equal(rv.vx, vx)
np.testing.assert_equal(rv.vy, vy)
np.testing.assert_equal(rv.vz, vz)
np.testing.assert_equal(rv.t, 0.0)
np.testing.assert_equal(rv.wavelength, 0.0)
for t1 in [0.0, 1.0, -1.1, 2.5]:
np.testing.assert_equal(
rv.positionAtTime(t1),
rv.r + t1 * rv.v
)
# Now add some random t's
t = rng.uniform(-1.0, 1.0, size=size)
rv = batoid.RayVector(x, y, z, vx, vy, vz, t)
np.testing.assert_equal(rv.x, x)
np.testing.assert_equal(rv.y, y)
np.testing.assert_equal(rv.z, z)
np.testing.assert_equal(rv.vx, vx)
np.testing.assert_equal(rv.vy, vy)
np.testing.assert_equal(rv.vz, vz)
np.testing.assert_equal(rv.t, t)
np.testing.assert_equal(rv.wavelength, 0.0)
for t1 in [0.0, 1.4, -1.3, 2.1]:
np.testing.assert_equal(
rv.positionAtTime(t1),
rv.r + rv.v*(t1-rv.t)[:,None]
)
@timer
def test_propagate():
rng = np.random.default_rng(577)
size = 10_000
x = rng.uniform(-1, 1, size=size)
y = rng.uniform(-1, 1, size=size)
z = rng.uniform(-0.1, 0.1, size=size)
vx = rng.uniform(-0.05, 0.05, size=size)
vy = rng.uniform(-0.05, 0.05, size=size)
vz = np.sqrt(1.0 - vx*vx - vy*vy)
# Try with default t=0 first
rv = batoid.RayVector(x, y, z, vx, vy, vz)
for t1 in [0.0, 1.0, -1.1, 2.5]:
rvcopy = rv.copy()
r1 = rv.positionAtTime(t1)
rvcopy.propagate(t1)
np.testing.assert_equal(
rvcopy.r,
r1
)
np.testing.assert_equal(
rvcopy.v,
rv.v
)
np.testing.assert_equal(
rvcopy.t,
t1
)
# Now add some random t's
t = rng.uniform(-1.0, 1.0, size=size)
rv = batoid.RayVector(x, y, z, vx, vy, vz, t)
for t1 in [0.0, 1.0, -1.1, 2.5]:
rvcopy = rv.copy()
r1 = rv.positionAtTime(t1)
rvcopy.propagate(t1)
np.testing.assert_equal(
rvcopy.r,
r1
)
np.testing.assert_equal(
rvcopy.v,
rv.v
)
np.testing.assert_equal(
rvcopy.t,
t1
)
@timer
def test_phase():
rng = np.random.default_rng(5772)
size = 10_000
for n in [1.0, 1.3]:
x = rng.uniform(-1, 1, size=size)
y = rng.uniform(-1, 1, size=size)
z = rng.uniform(-0.1, 0.1, size=size)
vx = rng.uniform(-0.05, 0.05, size=size)
vy = rng.uniform(-0.05, 0.05, size=size)
vz = np.sqrt(1.0/(n*n) - vx*vx - vy*vy)
t = rng.uniform(-1.0, 1.0, size=size)
wavelength = rng.uniform(300e-9, 1100e-9, size=size)
rv = batoid.RayVector(x, y, z, vx, vy, vz, t, wavelength)
# First explicitly check that phase is 0 at position and time of individual
# rays
for i in rng.choice(size, size=10):
np.testing.assert_equal(
rv.phase(rv.r[i], rv.t[i])[i],
0.0
)
# Now use actual formula
# phi = k.(r-r0) - (t-t0)omega
# k = 2 pi v / lambda |v|^2
# omega = 2 pi / lambda
# |v| = 1 / n
for r1, t1 in [
((0, 0, 0), 0),
((0, 1, 2), 3),
((-1, 2, 4), -1),
((0, 1, -4), -2)
]:
phi = np.einsum("ij,ij->i", rv.v, r1-rv.r)
phi *= n*n
phi -= (t1-rv.t)
phi *= 2*np.pi/wavelength
np.testing.assert_allclose(
rv.phase(r1, t1),
phi,
rtol=0,
atol=1e-7
)
for i in rng.choice(size, size=10):
s = slice(i, i+1)
rvi = batoid.RayVector(
x[s], y[s], z[s],
vx[s], vy[s], vz[s],
t[s].copy(), wavelength[s].copy()
)
# Move integer number of wavelengths ahead
ti = rvi.t[0]
wi = rvi.wavelength[0]
r1 = rvi.positionAtTime(ti + 5123456789*wi)[0]
a = rvi.amplitude(r1, ti)
np.testing.assert_allclose(a.real, 1.0, rtol=0, atol=2e-5)
np.testing.assert_allclose(a.imag, 0.0, rtol=0, atol=2e-5)
# Half wavelength
r1 = rvi.positionAtTime(ti + 6987654321.5*wi)[0]
a = rvi.amplitude(r1, ti)
np.testing.assert_allclose(a.real, -1.0, rtol=0, atol=2e-5)
np.testing.assert_allclose(a.imag, 0.0, rtol=0, atol=2e-5)
# Quarter wavelength
r1 = rvi.positionAtTime(ti + 0.25*wi)[0]
a = rvi.amplitude(r1, ti)
np.testing.assert_allclose(a.real, 0.0, rtol=0, atol=2e-5)
np.testing.assert_allclose(a.imag, 1.0, rtol=0, atol=2e-5)
# Three-quarters wavelength
r1 = rvi.positionAtTime(ti + 7182738495.75*wi)[0]
a = rvi.amplitude(r1, ti)
np.testing.assert_allclose(a.real, 0.0, rtol=0, atol=2e-5)
np.testing.assert_allclose(a.imag, -1.0, rtol=0, atol=2e-5)
# We can also keep the position the same and change the time in
# half/quarter integer multiples of the period.
a = rvi.amplitude(rvi.r[0], rvi.t[0]+5e9*wi)
np.testing.assert_allclose(a.real, 1.0, rtol=0, atol=1e-5)
np.testing.assert_allclose(a.imag, 0.0, rtol=0, atol=1e-5)
a = rvi.amplitude(rvi.r[0], rvi.t[0]+(5e9+5.5)*wi)
np.testing.assert_allclose(a.real, -1.0, rtol=0, atol=1e-5)
np.testing.assert_allclose(a.imag, 0.0, rtol=0, atol=1e-5)
a = rvi.amplitude(rvi.r[0], rvi.t[0]+(5e9+2.25)*wi)
np.testing.assert_allclose(a.real, 0.0, rtol=0, atol=1e-5)
np.testing.assert_allclose(a.imag, -1.0, rtol=0, atol=1e-5)
a = rvi.amplitude(rvi.r[0], rvi.t[0]+(5e9+1.75)*wi)
np.testing.assert_allclose(a.real, 0.0, rtol=0, atol=1e-5)
np.testing.assert_allclose(a.imag, 1.0, rtol=0, atol=1e-5)
# If we pick a point anywhere along a vector originating at the ray
# position, but orthogonal to its direction of propagation, then we
# should get phase = 0 (mod 2pi).
v1 = np.array([1.0, 0.0, 0.0])
v1 = np.cross(rvi.v[0], v1)
p1 = rvi.r[0] + v1
a = rvi.amplitude(p1, rvi.t[0])
np.testing.assert_allclose(a.real, 1.0, rtol=0, atol=1e-5)
np.testing.assert_allclose(a.imag, 0.0, rtol=0, atol=1e-5)
@timer
def test_sumAmplitude():
import time
rng = np.random.default_rng(57721)
size = 10_000
for n in [1.0, 1.3]:
x = rng.uniform(-1, 1, size=size)
y = rng.uniform(-1, 1, size=size)
z = rng.uniform(-0.1, 0.1, size=size)
vx = rng.uniform(-0.05, 0.05, size=size)
vy = rng.uniform(-0.05, 0.05, size=size)
vz = np.sqrt(1.0/(n*n) - vx*vx - vy*vy)
t = rng.uniform(-1.0, 1.0, size=size)
wavelength = rng.uniform(300e-9, 1100e-9, size=size)
rv = batoid.RayVector(x, y, z, vx, vy, vz, t, wavelength)
satime = 0
atime = 0
for r1, t1 in [
((0, 0, 0), 0),
((0, 1, 2), 3),
((-1, 2, 4), -1),
((0, 1, -4), -2)
]:
at0 = time.time()
s1 = rv.sumAmplitude(r1, t1)
at1 = time.time()
s2 = np.sum(rv.amplitude(r1, t1))
at2 = time.time()
np.testing.assert_allclose(s1, s2, rtol=0, atol=1e-11)
satime += at1-at0
atime += at2-at1
# print(f"sumAplitude() time: {satime}")
# print(f"np.sum(amplitude()) time: {atime}")
@timer
def test_equals():
import time
rng = np.random.default_rng(577215)
size = 10_000
x = rng.uniform(-1, 1, size=size)
y = rng.uniform(-1, 1, size=size)
z = rng.uniform(-0.1, 0.1, size=size)
vx = rng.uniform(-0.05, 0.05, size=size)
vy = rng.uniform(-0.05, 0.05, size=size)
vz = np.sqrt(1.0 - vx*vx - vy*vy)
t = rng.uniform(-1.0, 1.0, size=size)
wavelength = rng.uniform(300e-9, 1100e-9, size=size)
flux = rng.uniform(0.9, 1.1, size=size)
vignetted = rng.choice([True, False], size=size)
failed = rng.choice([True, False], size=size)
args = x, y, z, vx, vy, vz, t, wavelength, flux, vignetted, failed
rv = batoid.RayVector(*args)
rv2 = rv.copy()
assert rv == rv2
for i in range(len(args)):
newargs = [args[i].copy() for i in range(len(args))]
ai = newargs[i]
if ai.dtype == float:
ai[0] = 1.2+ai[0]*3.45
elif ai.dtype == bool:
ai[0] = not ai[0]
# else panic!
rv2 = batoid.RayVector(*newargs)
assert rv != rv2
# Repeat, but force comparison on device
rv2 = rv.copy()
rv._rv.x.syncToDevice()
rv._rv.y.syncToDevice()
rv._rv.z.syncToDevice()
rv._rv.vx.syncToDevice()
rv._rv.vy.syncToDevice()
rv._rv.vz.syncToDevice()
rv._rv.t.syncToDevice()
rv._rv.wavelength.syncToDevice()
rv._rv.flux.syncToDevice()
rv._rv.vignetted.syncToDevice()
rv._rv.failed.syncToDevice()
assert rv == rv2
for i in range(len(args)):
newargs = [args[i].copy() for i in range(len(args))]
ai = newargs[i]
if ai.dtype == float:
ai[0] = 1.2+ai[0]*3.45
elif ai.dtype == bool:
ai[0] = not ai[0]
# else panic!
rv2 = batoid.RayVector(*newargs)
assert rv != rv2
@timer
def test_asGrid():
rng = np.random.default_rng(5772156)
for _ in range(10):
backDist = rng.uniform(9.0, 11.0)
wavelength = rng.uniform(300e-9, 1100e-9)
nx = 1
while (nx%2) == 1:
nx = rng.integers(10, 21)
lx = rng.uniform(1.0, 10.0)
dx = lx/(nx-2)
dirCos = np.array([
rng.uniform(-0.1, 0.1),
rng.uniform(-0.1, 0.1),
rng.uniform(-1.2, -0.8),
])
dirCos /= np.sqrt(np.dot(dirCos, dirCos))
# Some things that should be equivalent
grid1 = batoid.RayVector.asGrid(
backDist=backDist, wavelength=wavelength,
nx=nx, lx=lx, dirCos=dirCos
)
grid2 = batoid.RayVector.asGrid(
backDist=backDist, wavelength=wavelength,
nx=nx, dx=dx, dirCos=dirCos
)
grid3 = batoid.RayVector.asGrid(
backDist=backDist, wavelength=wavelength,
dx=dx, lx=lx, dirCos=dirCos
)
grid4 = batoid.RayVector.asGrid(
backDist=backDist, wavelength=wavelength,
nx=nx, lx=(lx, 0.0), dirCos=dirCos
)
theta_x, theta_y = batoid.utils.dirCosToField(*dirCos)
grid5 = batoid.RayVector.asGrid(
backDist=backDist, wavelength=wavelength,
nx=nx, lx=(lx, 0.0), theta_x=theta_x, theta_y=theta_y
)
rays_allclose(grid1, grid2)
rays_allclose(grid1, grid3)
rays_allclose(grid1, grid4)
rays_allclose(grid1, grid5)
# Check distance to chief ray
cridx = (nx//2)*nx+nx//2
obs_dist = np.sqrt(np.dot(grid1.r[cridx], grid1.r[cridx]))
np.testing.assert_allclose(obs_dist, backDist)
np.testing.assert_allclose(grid1.t, 0)
np.testing.assert_allclose(grid1.wavelength, wavelength)
np.testing.assert_allclose(grid1.vignetted, False)
np.testing.assert_allclose(grid1.failed, False)
np.testing.assert_allclose(grid1.vx, dirCos[0])
np.testing.assert_allclose(grid1.vy, dirCos[1])
np.testing.assert_allclose(grid1.vz, dirCos[2])
# Check distribution of points propagated to entrance pupil
pupil = batoid.Plane()
pupil.intersect(grid1)
np.testing.assert_allclose(np.diff(grid1.x)[0], dx)
np.testing.assert_allclose(np.diff(grid1.y)[0], 0, atol=1e-14)
np.testing.assert_allclose(np.diff(grid1.x)[nx-1], -dx*(nx-1))
np.testing.assert_allclose(np.diff(grid1.y)[nx-1], dx)
# Another set, but with odd nx
for _ in range(10):
backDist = rng.uniform(9.0, 11.0)
wavelength = rng.uniform(300e-9, 1100e-9)
while (nx%2) == 0:
nx = rng.integers(10, 21)
lx = rng.uniform(1.0, 10.0)
dx = lx/(nx-1)
dirCos = np.array([
rng.uniform(-0.1, 0.1),
rng.uniform(-0.1, 0.1),
rng.uniform(-1.2, -0.8),
])
dirCos /= np.sqrt(np.dot(dirCos, dirCos))
grid1 = batoid.RayVector.asGrid(
backDist=backDist, wavelength=wavelength,
nx=nx, lx=lx, dirCos=dirCos
)
grid2 = batoid.RayVector.asGrid(
backDist=backDist, wavelength=wavelength,
nx=nx, dx=dx, dirCos=dirCos
)
grid3 = batoid.RayVector.asGrid(
backDist=backDist, wavelength=wavelength,
nx=nx, lx=(lx, 0), dirCos=dirCos
)
# ... but the following is not equivalent, since default is to always
# infer an even nx and ny
# grid4 = batoid.RayVector.asGrid(
# backDist=backDist, wavelength=wavelength,
# dx=1/9, lx=1.0, dirCos=dirCos
# )
rays_allclose(grid1, grid2)
rays_allclose(grid1, grid3)
cridx = (nx*nx-1)//2
obs_dist = np.sqrt(np.dot(grid1.r[cridx], grid1.r[cridx]))
np.testing.assert_allclose(obs_dist, backDist)
np.testing.assert_allclose(grid1.t, 0)
np.testing.assert_allclose(grid1.wavelength, wavelength)
np.testing.assert_allclose(grid1.vignetted, False)
np.testing.assert_allclose(grid1.failed, False)
np.testing.assert_allclose(grid1.vx, dirCos[0])
np.testing.assert_allclose(grid1.vy, dirCos[1])
np.testing.assert_allclose(grid1.vz, dirCos[2])
# Check distribution of points propagated to entrance pupil
pupil = batoid.Plane()
pupil.intersect(grid1)
np.testing.assert_allclose(np.diff(grid1.x)[0], dx)
np.testing.assert_allclose(np.diff(grid1.y)[0], 0, atol=1e-14)
np.testing.assert_allclose(np.diff(grid1.x)[nx-1], -dx*(nx-1))
np.testing.assert_allclose(np.diff(grid1.y)[nx-1], dx)
for _ in range(10):
# Check nrandom
rays = batoid.RayVector.asGrid(
backDist=backDist, wavelength=wavelength,
lx=1.0, nx=1,
nrandom=1000, dirCos=dirCos
)
np.testing.assert_allclose(rays.t, 0)
np.testing.assert_allclose(rays.wavelength, wavelength)
np.testing.assert_allclose(rays.vignetted, False)
| np.testing.assert_allclose(rays.failed, False) | numpy.testing.assert_allclose |
import os
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
import albumentations as A
from pathlib import Path
import torch
from torch import nn
from src_backup.cdan import get_model
from src.backbone.iresnet import get_arcface_backbone
class MyModel(nn.Module):
def __init__(self, backbone):
super().__init__()
self.backbone = backbone
self.layers = [backbone.layer1, backbone.layer2, backbone.layer3, backbone.layer4]
def forward(self, x):
activations = []
x = self.backbone.prelu(self.backbone.bn1(self.backbone.conv1(x)))
for layer in self.layers:
x = layer(x)
activations.append(x)
return activations
def get_best_model(mode='arcface', base_path='log/best_weight/{}.pth'):
model_path_dict = {'BSP': 'FACE_CDAN_BSP_BOTH', 'DAN': 'FACE_DAN_BOTH',
'BOTH': 'FACE_BOTH', 'FACE': 'FACE'}
backbone = get_arcface_backbone('cpu')
if mode != 'arcface':
backbone = get_model(backbone, fc_dim=512, embed_dim=512, nclass=460, hidden_dim=1024,
pretrained_path=base_path.format(model_path_dict[mode])).backbone
backbone.eval()
return MyModel(backbone)
def img_preprocessing(img):
transforms = A.Compose([
A.SmallestMaxSize(112),
A.CenterCrop(112, 112, p=1),
])
img = ((np.transpose(transforms(image=np.array(img))['image'], (2, 0, 1)) / 255) - 0.5) / 0.5
return img
def activation_based_map_f(activations):
attention_map = []
for activation in activations:
img = activation.pow(2).mean(1).detach().numpy()[0, :, :, np.newaxis]
resized_img = A.Resize(112, 112, 4)(image=img)['image']
attention_map.append((resized_img, img))
return attention_map
def show_example(img_path='iu_mask.jpg', mode='arcface', show=True):
img = Image.open(img_path)
img_resized = A.Resize(112, 112)(image=np.array(img))['image']
img_np = | np.array(img) | numpy.array |
import h5py
import os
import re
import numpy as np
import glob
import json
import os
def dumper(obj):
try:
return obj.toJSON()
except:
return obj.tolist()
def read_avg(filepath):
with open(filepath, encoding="utf8", errors='ignore') as f:
lines = [line for line in f.readlines() if line != '\n']
# print(filepath)
AXESDICT = {}
PROPERTIES = {}
DATA = {}
DATA['I'] = []
i=0
while i < len(lines)-1:
if 'Dump of DataSpace' in lines[i]:
PROPERTIES.update({'VGD_Location':lines[i].split(';Dump of DataSpace')[1].strip().replace("'",'')})
i+=1
elif (lines[i][0] != ';') and (lines[i][0] != '\n'):
if '$PROPERTIES' in lines[i]:
i+=1
while (lines[i][0] !='$'):
if lines[i][0] != ';':
props = [k.strip() for k in re.split('=|:',lines[i].strip())]
if props[1] =='VT_BSTR':
if props[0] == 'DS_EXT_SUPROPID_COMMENTS':
comments = props[2].replace("'","")
i+=1
while lines[i][0] == ' ':
comments+=lines[i].replace("'","")
i+=1
PROPERTIES.update({props[0].split(':')[0].strip():comments})
else:
PROPERTIES.update({props[0].split(':')[0].strip():props[2].replace("'","")})
i+=1
elif props[1] =='VT_DATE':
props = [k.strip() for k in re.split('=',lines[i].strip())]
PROPERTIES.update({props[0].split(':')[0].strip():props[1]})
i+=1
elif (props[1] =='VT_I4') or (props[1] =='VT_I2'):
PROPERTIES.update({props[0].split(':')[0].strip():int(props[2])})
i+=1
elif props[1] == 'VT_BOOL':
PROPERTIES.update({props[0].split(':')[0].strip():bool(props[2])})
i+=1
elif (props[1] =='VT_R4'):
PROPERTIES.update({props[0].split(':')[0].strip():np.float(props[2])})
i+=1
else:
PROPERTIES.update({props[0].split(':')[0].strip():props[2]})
print(props[1],'is a not specified dat format')
i+=1
else:
i+=1
elif '$SPACEAXES' in lines[i]:
SPACEAXES = {}
spaxes_pars = [j for j in [ k.strip() for k in re.split(',|;|=', lines[i-1].strip())] if j !=''] #Get SPACEAXES parameter names
i+=1
while (lines[i][0] !='$'):
if lines[i][0] != ';':
spax_vals = [re.split(',|=',lines[i].strip())[k].strip() for k in range(len(spaxes_pars))] #Get space axis parameter values
SPACEAXES.update({spax_vals[0]:{spaxes_pars[k]:spax_vals[k] for k in range(1,len(spax_vals))}}) #Organize into dictionary with each axis as a key
i+=1
elif '$AXISVALUE' in lines[i]:
AXVAL = [x.strip() for x in re.split(r'\b$AXISVALUE\b|\bDATAXIS\b|\bSPACEAXIS\b|\bLABEL\b|\bPOINT\b|\bVALUE\b|=|;',lines[i].strip()) if x.strip() not in ['$AXISVALUE','']]
try:
if (eval(AXVAL[2]) == 'Etch Time') or (eval(AXVAL[2]) == 'Etch Level'):
AXESDICT[eval(AXVAL[2])].append(np.float(AXVAL[4]))
elif eval(AXVAL[2]) == 'Position':
AXESDICT[eval(AXVAL[2])].append(eval(AXVAL[4]))
except:
if (eval(AXVAL[2]) == 'Etch Time') or (eval(AXVAL[2]) == 'Etch Level'):
AXESDICT[eval(AXVAL[2])] = []
AXESDICT[eval(AXVAL[2])].append(np.float(AXVAL[4]))
elif eval(AXVAL[2]) == 'Position':
AXESDICT[eval(AXVAL[2])] = []
AXESDICT[eval(AXVAL[2])].append(eval(AXVAL[4]))
AXVAL[2]
i+=1
elif '$DATA=*' in lines[i]:
data_temp = []
i+=1
while ('LIST@' in lines[i].split()[0]):
if lines[i][0] != ';':
data_temp.extend([np.float(k.strip()) for k in lines[i].split('=')[1].split(',')])
i+=1
if i == len(lines):
break
if int(SPACEAXES['0']['numPoints']) != len(data_temp):
print(lines[i-1].split()[0])
print(int(SPACEAXES['0']['numPoints']),len(data_temp))
print('data is not the same length as the numpoints')
break
DATA['I'].append(data_temp)
else:
i+=1
else:
i+=1
start = np.float(PROPERTIES['DS_SOPROPID_ENERGY']) - np.float(SPACEAXES['0']['start'])
stop = np.float(PROPERTIES['DS_SOPROPID_ENERGY']) - np.float(SPACEAXES['0']['start']) - | np.float(SPACEAXES['0']['width']) | numpy.float |
import numpy as np
from scipy import integrate
from scipy import interpolate
# Cosmological parameters
Om0 = 0.272
Ol0 = 1.0 - Om0
h = 0.704
ns = 0.961
sigma80 = 0.807
SPEEDOFLIGHT_KMS = 2.99792458e5
def nhat(alpha, delta):
nhat = np.zeros(3)
nhat[0] = np.cos(delta) * np.cos(alpha)
nhat[1] = np.cos(delta) * np.sin(alpha)
nhat[2] = np.sin(delta)
return nhat
def angsep(alpha1, alpha2, delta1, delta2):
cos_ang = np.sin(delta1)*np.sin(delta2) + np.cos(delta1)*np.cos(delta2)*np.cos(alpha1-alpha2)
ang = np.arccos(cos_ang)
return ang
class cosmo:
def __init__(self, Om0=Om0, h=h, ns=ns, sigma80=sigma80, **kwargs):
self.Om0 = Om0
self.Ol0 = 1.0 - self.Om0
self.Ob0 = 0.045
self.Tcmb0 = 2.7255
self.h = h
self.ns = ns
self.sigma80 = sigma80
self.H0 = 100.0 # [h km/s/Mpc]
self.q0 = 0.5*self.Om0 - self.Ol0
self.gamma = 0.55 # growth index
self._As = None
self._sigmav = None
self.log_xi_perp_interpolator = None
self.xi_para_interpolator = None
# Eisenstein & Hu (1998) zero baryon transfer function parameters
ombom0 = self.Ob0 / self.Om0 # shorthand
om0h2 = self.Om0 * self.h**2
ombh2 = self.Ob0 * self.h**2
self.theta2p7 = self.Tcmb0 / 2.7
# Equation 31
alphaGamma = 1.0 - 0.328*np.log(431.0*om0h2)*ombom0 + 0.38*np.log(22.3*om0h2)*ombom0**2
# Quantities for Equation 30 (computed in transferFunction)
self.Gamma1 = self.Om0*self.h*alphaGamma
self.Gamma2 = self.Om0*self.h*(1.0-alphaGamma)
# Equation 26
self.s_EH98 = 44.5*np.log(9.83/om0h2) / np.sqrt(1.0+10.0*ombh2**0.75)
# halofit spectral parameters
self.rknl = None
self.rneff = None
self.rncur = None
@property
def dH(self):
return (SPEEDOFLIGHT_KMS)/self.H0 * 1e3 # c/H_0 [h^-1 kpc]
def E_Hub(self, z):
"""
Computes E(z) = H(z)/H0
"""
E2 = self.Om0*(1.+z)**3 + self.Ol0
if np.all(E2 > 0.0):
return np.sqrt(E2)
else:
return np.NaN
def Omega_m(self, z):
"""
Evolution of omega matter with redshift
"""
EH = self.E_Hub(z)
return self.Om0*(1.+z)**3 / EH**2
def Omega_v(self, z):
"""
Evolution of omega vacuum with redshift
"""
EH = self.E_Hub(z)
return self.Ol0 / EH**2
def chi(self, z, use_lowz=False):
"""
Computes the comoving distance in units h^-1 kpc
"""
def _integrand(z):
return 1.0/self.E_Hub(z) # 1/E(z) = H0/H(z)
if use_lowz: # if z<<1
return self.dH * (z - 0.5*(1.+self.q0)*z**2)
else:
if np.isclose(z, 0.0):
return 0.0
zp1 = z + 1.0
if np.isfinite(_integrand(z)): # prevent negative square roots
if np.isclose(self.Om0, 1.0): # EdS
return 2.*zp1*(1.-1./np.sqrt(zp1)) * self.dH
elif np.isclose(self.Ol0, 1.0): # dS
return z * self.dH
else:
y,err = integrate.quad(_integrand, 0.0, z, epsabs=1e-8)
return y * self.dH
else:
return float(1e7)
def chi_lowz(self, z): # accepts array input for z
return self.dH*(z - 0.5*(1.+self.q0)*z**2)
def ztot(self, z, v=0.0):
return (1.0 + z) * (1.0 + v/SPEEDOFLIGHT_KMS) - 1.0
def kappa_v(self, z, v=0.0, use_lowz=False):
dA_bar = self.chi(z, use_lowz) / (1.+z)
dH = self.dH/self.E_Hub(z)
return (1.0 - dH/dA_bar) * (v/SPEEDOFLIGHT_KMS)
def dA(self, z, v=0.0, use_lowz=False):
"""
Computes angular diameter distance in units h^-1 kpc
"""
ret = self.chi(z, use_lowz) / (1.+z)
if v == 0.0:
ret *= 1.0
else:
ret *= 1.0 - self.kappa_v(z, v, use_lowz)
return ret
def transferFunction(self, k):
"""
The zero-baryon transfer function according to Eisenstein & Hu 1998.
This fitting function is significantly simpler than the full version
and still approximates numerical calculations from a Boltzmann code
to better than 10%, and almost as accurate when computing the variance
or correlation function (see the Colossus code paper for details).
"""
kh = k*self.h # convert kh from hMpc^-1 to Mpc^-1
# Equation 30
Gamma = self.Gamma1 + self.Gamma2 / (1.0 + (0.43*kh*self.s_EH98)**4)
# Equation 28
q = k * self.theta2p7 * self.theta2p7 / Gamma
# Equation 29
C0 = 14.2 + 731.0 / (1.0 + 62.5*q)
L0 = np.log(2.0*np.exp(1.0) + 1.8*q)
Tk = L0 / (L0 + C0*q*q)
return Tk
def growthFactor(self, z): # D(a)
return 1.0
def growthFactor_approx(self, z):
# The Carroll, Press & Turner (1992) approximation, eq. 29 for g(Omega)=D/a
om_m = self.Omega_m(z)
om_v = self.Omega_v(z)
g = 2.5*om_m/(om_m**(4./7.)-om_v+(1.+om_m/2.)*(1.+om_v/70.))
g0 = 2.5*self.Om0/(self.Om0**(4./7.)-self.Ol0+(1.+self.Om0/2.)*(1.+self.Ol0/70.))
return g/g0/(1.+z) # D
def matterPowerSpectrum(self, k, z=0.0):
"""
The (linear) matter power spectrum at scale k
k has units h/Mpc so P(k) has units of [k^-3] i.e. (Mpc/h)^3
"""
T = self.transferFunction(k)
D = self.growthFactor(z)
Pk = self.As * D * D * T * T * k**self.ns
return Pk
def Delta2_L(self, k, z=0.0):
"""
Linear dimensionless matter power spectrum
"""
return k**3 * self.matterPowerSpectrum(k,z) / (2.*np.pi**2)
@property
def As(self): # scalar amplitude A_s of matter power spectrum
if self._As is None:
sigma80_int = self._sigmaExact()
self._As = (self.sigma80 / sigma80_int)**2
return self._As
def _sigmaExact(self):
"""
This computes the integral of sqrt[(sigma_80)^2 / A_s].
The infinite integral over k often causes trouble when the tophat filter is used.
Thus we determine sensible limits and integrate over a finite k-volume.
"""
def _integrand(lnk):
k = np.exp(lnk)
x = k * 8.0
if x < 1e-3:
W = 1.0
else:
W = 3.0 / x**3 * (np.sin(x) - x * np.cos(x)) # FT of tophat filter
T = self.transferFunction(k)
P_unnorm = T * T * k**self.ns # equal to P(k)/A_s
ret = P_unnorm * W**2 * k**3 # one factor of k due to the integration in log-k space
return ret
lnk_min, lnk_max = self._get_lnk_limits(_integrand)
sigma2, _ = integrate.quad(_integrand, lnk_min, lnk_max, epsabs=1e-9, limit=100)
sigma = np.sqrt(sigma2 / 2.0 / np.pi**2)
if np.isnan(sigma):
# raise Exception("Result is nan")
print('sigma integral is NaN')
print('with parameters Om0={}, sigma8={}'.format(self.Om0,self.sigma80))
return sigma
def _sep(self, coord_obj1, coord_obj2, use_lowz=False):
"""
Computes the comoving seperation between two points and
the angles made by the two lines of sight and the connecting
line.
Parameters
-------------------------------------------------------
coord_obj1: array-like e.g. 3-tuple (z,RA,DEC)
coord_obj2: array-like e.g. 3-tuple (z,RA,DEC)
The angular coordinates RA and DEC are in degrees.
Returns
-------------------------------------------------------
(r,theta1,theta2): 3-tuple
r is the comoving seperation (Mpc/h)
theta1(2) in radians is the seperation angle between the
LOS of object 1(2) and the connecting line.
Notes
-------------------------------------------------------
rhat is directed from point 1 to point 2
"""
deg2rad = np.pi/180
z1, RA1, DEC1 = coord_obj1
z2, RA2, DEC2 = coord_obj2
alpha1 = RA1 * deg2rad
alpha2 = RA2 * deg2rad
delta1 = DEC1 * deg2rad
delta2 = DEC2 * deg2rad
nhat1 = nhat(alpha1, delta1)
nhat2 = nhat(alpha2, delta2)
xvec1 = self.chi(z1, use_lowz) * 1e-3 * nhat1 # since chi in kpc/h and want Mpc/h
xvec2 = self.chi(z2, use_lowz) * 1e-3 * nhat2
rvec = xvec2 - xvec1
r = np.sqrt(np.dot(rvec,rvec))
if r < 1e-14:
theta1 = np.pi/2
theta2 = np.pi/2
else:
rhat = rvec/r
theta1 = np.arccos(np.dot(rhat,nhat1))
theta2 = np.arccos(np.dot(rhat,nhat2))
return r, theta1, theta2 # units radians and Mpc/h
def xiV_perp(self, r):
def _integrand_perp(lnk, r):
k = np.exp(lnk)
Pk = self.matterPowerSpectrum(k)
x = k * r
if x < 1e-3:
Kperp = 1/3.
else:
j1 = np.sin(x)/x**2 - np.cos(x)/x
Kperp = j1/x
ret = k * Pk * Kperp
ret *= (self.H0 * self.Om0**self.gamma)**2 / (2*np.pi**2)
return ret
if self.log_xi_perp_interpolator is not None:
ret = 10**self.log_xi_perp_interpolator(r)
else:
kwargs = {'epsabs':1e-8, 'limit':100}
lnk_min = -8
if r > 0.0:
lnk_max = min(3, np.log(26.6661/r)) # 8th +ve root of Kperp
else:
lnk_max = 3
ret, _ = integrate.quad(_integrand_perp, lnk_min, lnk_max, args=(r,), **kwargs)
return ret
def xiV_para(self, r):
def _integrand_para(lnk, r):
k = np.exp(lnk)
Pk = self.matterPowerSpectrum(k)
x = k * r
if x < 1e-3:
Kpara = 1/3.
else:
j0 = np.sin(x)/x
j1 = np.sin(x)/x**2 - np.cos(x)/x
Kpara = j0 - 2.*j1/x
ret = k * Pk * Kpara
ret *= (self.H0 * self.Om0**self.gamma)**2 / (2*np.pi**2)
return ret
if self.xi_para_interpolator is not None:
ret = self.xi_para_interpolator(r)
else:
kwargs = {'epsabs':1e-8, 'limit':100}
lnk_min = -8
if r > 0.0:
lnk_max = min(3, np.log(25.0528/r)) # 8th +ve root of Kpara
else:
lnk_max = 3
ret, _ = integrate.quad(_integrand_para, lnk_min, lnk_max, args=(r,), **kwargs)
return ret
def init_xiV_interpolation(self, rmax=400.0, Nperp=30, Npara=70, use_deriv=False):
"""
Notes
-------------------------------------------------------
To minimise number of calls to xiV_perp we note that it is a
positive definite function and when transformed to logspace
is close to linear which is why we use a smaller number of
sampling points. We thus interpolate this function in logspace.
xiV_para crosses zero so we interpolate as normal.
"""
self.log_xi_perp_interpolator = None
self.xi_para_interpolator = None
self.dlog_xi_perp_interpolator = None
r_perp = np.linspace(0, rmax, Nperp)
xi_perp = np.array([self.xiV_perp(r) for r in r_perp])
if use_deriv:
# setting s=0 interpolates all points
self.log_xi_perp_interpolator = interpolate.UnivariateSpline(r_perp, np.log10(xi_perp), s=0, k=3)
self.dlog_xi_perp_interpolator = self.log_xi_perp_interpolator.derivative()
else:
r_para = np.linspace(0, rmax, Npara)
xi_para = np.array([self.xiV_para(r) for r in r_para])
self.log_xi_perp_interpolator = interpolate.interp1d(r_perp, np.log10(xi_perp))
self.xi_para_interpolator = interpolate.interp1d(r_para, xi_para)
def xiV(self, coord_obj1, coord_obj2, use_interpolation=False, use_lowz=False, use_deriv=False):
"""
The velocity correlation function for two objects seperated by r in
units Mpc/h. The two angles are the angular seperations made by the
LOS (x2) and connecting line between each object. We assume no
evolution in the power spectrum and hence correlation function.
Parameters
-------------------------------------------------------
coord_obj1: array-like e.g. 3-tuple (z,RA,DEC)
coord_obj2: array-like e.g. 3-tuple (z,RA,DEC)
The angular coordinates RA and DEC are in degrees.
use_interpolation: bool
If is true interpolate perp and para correlation functions
as function of seperation r
use_lowz: bool
If is true evaluate distances using the low-z Taylor approximation.
use_deriv: bool
If is true evaluate xi_para using that xi_para = d(r * xi_perp)/dr
Returns
-------------------------------------------------------
xi_V: float
the velocity correlation in units (km/s)^2
"""
r, theta1, theta2 = self._sep(coord_obj1, coord_obj2, use_lowz)
if use_interpolation:
if self.log_xi_perp_interpolator is None:
self.init_xiV_interpolation()
xi_perp = 10**self.log_xi_perp_interpolator(r)
if use_deriv:
xi_para = xi_perp * (1. + np.log(10.)*r*self.dlog_xi_perp_interpolator(r))
else:
xi_para = self.xi_para_interpolator(r)
else:
if r < 1e-14: # points very close together so compute autocorrelation
xi_perp = self.xiV_perp(r)
return xi_perp # equal to xi_para which is equal to xi_v(r=0)
else:
xi_perp = self.xiV_perp(r)
xi_para = self.xiV_para(r)
ret = np.sin(theta1)*np.sin(theta2)*xi_perp + np.cos(theta1)*np.cos(theta2)*xi_para
return ret
@property
def sigmav(self): # 1D velocity dispersion at z=0 in km/s
if self._sigmav is None:
coord = (1e-10, 1.0, 1.0)
sigmav2 = self.xiV(coord, coord)
self._sigmav = np.sqrt(sigmav2)
return self._sigmav
def xiV_correlation(self, coord_obj1, coord_obj2):
xiV_11 = self.xiV(coord_obj1, coord_obj1)
xiV_22 = self.xiV(coord_obj2, coord_obj2)
xiV_12 = self.xiV(coord_obj1, coord_obj2)
rho = xiV_12 / np.sqrt(xiV_11 * xiV_22)
return rho
@staticmethod
def _get_lnk_limits(FCN_integrand, test_k_min=1e-20, test_k_max=1e20):
"""
The integration limits are determined by demanding that the
integrand is some factor 1e-6 smaller than at its maximum.
This method should be called when performing Bessel integrals.
"""
test_integrand_min = 1e-6
test_lnk_min = np.log(test_k_min * 1.0001)
test_lnk_max = np.log(test_k_max * 0.9999)
test_lnk = np.arange(test_lnk_min, test_lnk_max, 2.0) # array of ln(k)'s
test_k_integrand = np.zeros_like(test_lnk)
n_test = len(test_lnk)
for i in range(n_test):
test_k_integrand[i] = FCN_integrand(test_lnk[i])
integrand_max = np.max(test_k_integrand)
min_index = 0
while test_k_integrand[min_index] < integrand_max * test_integrand_min:
min_index += 1
if min_index > n_test - 2:
raise Exception("Could not find lower integration limit")
lnk_min = test_lnk[min_index]
min_index -= 1
max_index = min_index + 1
while test_k_integrand[max_index] > integrand_max * test_integrand_min:
max_index += 1
if max_index == n_test:
raise Exception("Could not find upper integration limit")
lnk_max = test_lnk[max_index]
return lnk_min, lnk_max
def _get_halofit_spectral_pars(self): # Halofit
"""
Computes
rknl: wavenumber where nonlinearity begins (S03 eqn C6)
rneff: effective spectral index (S03 eqn C7)
rncur: second derivative of the power spectrum at rknl (S03 eqn C8)
taken from Smith and Peacock halofit fortran code
see https://www.roe.ac.uk/~jap/haloes/
"""
if any(p is None for p in [self.rknl, self.rneff, self.rncur]):
xlogr1 = -2.0
xlogr2 = 3.5
not_converged = True
while not_converged:
rmid = 10**((xlogr2+xlogr1)/2.)
sig,d1,d2 = self.wint(rmid)
diff = sig - 1.0
if diff > 0.001:
xlogr1 = np.log10(rmid)
not_converged = True
elif diff < -0.001:
xlogr2 = np.log10(rmid)
not_converged = True
else:
self.rknl = 1./rmid
self.rneff = -3-d1
self.rncur = -d2
not_converged = False
else:
pass
def wint(self, r): # Halofit
"""
The subroutine wint, finds the effective spectral quantities
rknl, rneff & rncur. This it does by calculating the radius of
the Gaussian filter at which the variance is unity = rknl.
rneff is defined as the first derivative of the variance, calculated
at the nonlinear wavenumber and similarly the rncur is the second
derivative at the nonlinear wavenumber.
Taken from Smith and Peacock halofit fortran code
see https://www.roe.ac.uk/~jap/haloes/
"""
nint = 3000
t = (np.arange(nint)+0.5)/nint
y = 1./t - 1.
rk = y
d2 = self.Delta2_L(rk)
x2 = y*y*r*r
w1 = np.exp(-x2)
w2 = 2*x2*w1
w3 = 4*x2*(1-x2)*w1
fn = d2/y/t/t
sum1 = np.sum(w1*fn)/nint
sum2 = np.sum(w2*fn)/nint
sum3 = np.sum(w3*fn)/nint
sig = np.sqrt(sum1)
d1 = -sum2/sum1
d2 = -sum2*sum2/sum1/sum1 - sum3/sum1
return sig, d1, d2
def _Delta2_NL_S03(self, k, z=0.0): # Halofit Smith+ 2003 original
self._get_halofit_spectral_pars()
rn = self.rneff
rncur = self.rncur
rknl = self.rknl
gam = 0.86485 + 0.2989*rn + 0.1631*rncur
a = 10**(1.4861 + 1.83693*rn + 1.67618*rn*rn + 0.7940*rn*rn*rn \
+ 0.1670756*rn*rn*rn*rn - 0.620695*rncur)
b = 10**(0.9463 + 0.9466*rn + 0.3084*rn*rn - 0.940*rncur)
c = 10**(-0.2807 + 0.6669*rn + 0.3214*rn*rn - 0.0793*rncur)
xmu = 10**(-3.54419 + 0.19086*rn)
xnu = 10**(0.95897 + 1.2857*rn)
alpha = 1.38848 + 0.3701*rn - 0.1452*rn*rn
beta = 0.8291 + 0.9854*rn + 0.3400*rn**2
om_m = self.Omega_m(z)
om_v = self.Omega_v(z)
if abs(1-om_m) > 0.01: # omega evolution
f1a = om_m**(-0.0732)
f2a = om_m**(-0.1423)
f3a = om_m**(0.0725)
f1b = om_m**(-0.0307)
f2b = om_m**(-0.0585)
f3b = om_m**(0.0743)
frac = om_v/(1.-om_m)
f1 = frac*f1b + (1-frac)*f1a
f2 = frac*f2b + (1-frac)*f2a
f3 = frac*f3b + (1-frac)*f3a
else:
f1 = 1.0
f2 = 1.0
f3 = 1.0
y = (k/rknl)
plin = self.Delta2_L(k,z)
ph = a*y**(f1*3) / (1+b*y**(f2)+(f3*c*y)**(3-gam))
ph /= (1+xmu*y**(-1)+xnu*y**(-2))
pq = plin * (1+plin)**beta/(1+plin*alpha) * | np.exp(-y/4.0-y**2/8.0) | numpy.exp |
import logging
import pickle
import random
from collections import Counter
from itertools import chain, permutations
from typing import Any, Dict, List, NamedTuple, Optional, Set, Tuple, Union
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from snorkel.analysis import Scorer
from snorkel.labeling.analysis import LFAnalysis
from snorkel.labeling.model.graph_utils import get_clique_tree
from snorkel.labeling.model.logger import Logger
from snorkel.types import Config
from snorkel.utils import probs_to_preds
from snorkel.utils.config_utils import merge_config
from snorkel.utils.lr_schedulers import LRSchedulerConfig
from snorkel.utils.optimizers import OptimizerConfig
Metrics = Dict[str, float]
class TrainConfig(Config):
"""Settings for the fit() method of LabelModel.
Parameters
----------
n_epochs
The number of epochs to train (where each epoch is a single optimization step)
lr
Base learning rate (will also be affected by lr_scheduler choice and settings)
l2
Centered L2 regularization strength
optimizer
Which optimizer to use (one of ["sgd", "adam", "adamax"])
optimizer_config
Settings for the optimizer
lr_scheduler
Which lr_scheduler to use (one of ["constant", "linear", "exponential", "step"])
lr_scheduler_config
Settings for the LRScheduler
prec_init
LF precision initializations / priors
seed
A random seed to initialize the random number generator with
log_freq
Report loss every this many epochs (steps)
mu_eps
Restrict the learned conditional probabilities to [mu_eps, 1-mu_eps]
"""
n_epochs: int = 100
lr: float = 0.01
l2: float = 0.0
optimizer: str = "sgd"
optimizer_config: OptimizerConfig = OptimizerConfig() # type: ignore
lr_scheduler: str = "constant"
lr_scheduler_config: LRSchedulerConfig = LRSchedulerConfig() # type: ignore
prec_init: float = 0.7
seed: int = np.random.randint(1e6)
log_freq: int = 10
mu_eps: Optional[float] = None
class LabelModelConfig(Config):
"""Settings for the LabelModel initialization.
Parameters
----------
verbose
Whether to include print statements
device
What device to place the model on ('cpu' or 'cuda:0', for example)
"""
verbose: bool = True
device: str = "cpu"
class _CliqueData(NamedTuple):
start_index: int
end_index: int
max_cliques: Set[int]
class LabelModel(nn.Module):
r"""A model for learning the LF accuracies and combining their output labels.
This class learns a model of the labeling functions' conditional probabilities
of outputting the true (unobserved) label `Y`, `P(\lf | Y)`, and uses this learned
model to re-weight and combine their output labels.
This class is based on the approach in [Training Complex Models with Multi-Task
Weak Supervision](https://arxiv.org/abs/1810.02840), published in AAAI'19. In this
approach, we compute the inverse generalized covariance matrix of the junction tree
of a given LF dependency graph, and perform a matrix completion-style approach with
respect to these empirical statistics. The result is an estimate of the conditional
LF probabilities, `P(\lf | Y)`, which are then set as the parameters of the label
model used to re-weight and combine the labels output by the LFs.
Currently this class uses a conditionally independent label model, in which the LFs
are assumed to be conditionally independent given `Y`.
Examples
--------
>>> label_model = LabelModel()
>>> label_model = LabelModel(cardinality=3)
>>> label_model = LabelModel(cardinality=3, device='cpu')
>>> label_model = LabelModel(cardinality=3)
Parameters
----------
cardinality
Number of classes, by default 2
**kwargs
Arguments for changing config defaults
Raises
------
ValueError
If config device set to cuda but only cpu is available
Attributes
----------
cardinality
Number of classes, by default 2
config
Training configuration
seed
Random seed
"""
def __init__(self, cardinality: int = 2, **kwargs: Any) -> None:
super().__init__()
self.config: LabelModelConfig = LabelModelConfig(**kwargs)
self.cardinality = cardinality
# Confirm that cuda is available if config is using CUDA
if self.config.device != "cpu" and not torch.cuda.is_available():
raise ValueError("device=cuda but CUDA not available.")
# By default, put model in eval mode; switch to train mode in training
self.eval()
def _create_L_ind(self, L: np.ndarray) -> np.ndarray:
"""Convert a label matrix with labels in 0...k to a one-hot format.
Parameters
----------
L
An [n,m] label matrix with values in {0,1,...,k}
Returns
-------
np.ndarray
An [n,m*k] dense np.ndarray with values in {0,1}
"""
L_ind = np.zeros((self.n, self.m * self.cardinality))
for y in range(1, self.cardinality + 1):
# A[x::y] slices A starting at x at intervals of y
# e.g., np.arange(9)[0::3] == np.array([0,3,6])
L_ind[:, (y - 1) :: self.cardinality] = np.where(L == y, 1, 0)
return L_ind
def _get_augmented_label_matrix(
self, L: np.ndarray, higher_order: bool = False
) -> np.ndarray:
"""Create augmented version of label matrix.
In augmented version, each column is an indicator
for whether a certain source or clique of sources voted in a certain
pattern.
Parameters
----------
L
An [n,m] label matrix with values in {0,1,...,k}
higher_order
Whether to include higher-order correlations (e.g. LF pairs) in matrix
Returns
-------
np.ndarray
An [n,m*k] dense matrix with values in {0,1}
"""
# Create a helper data structure which maps cliques (as tuples of member
# sources) --> {start_index, end_index, maximal_cliques}, where
# the last value is a set of indices in this data structure
self.c_data: Dict[int, _CliqueData] = {}
for i in range(self.m):
self.c_data[i] = _CliqueData(
start_index=i * self.cardinality,
end_index=(i + 1) * self.cardinality,
max_cliques=set(
[
j
for j in self.c_tree.nodes()
if i in self.c_tree.node[j]["members"]
]
),
)
L_ind = self._create_L_ind(L)
# Get the higher-order clique statistics based on the clique tree
# First, iterate over the maximal cliques (nodes of c_tree) and
# separator sets (edges of c_tree)
if higher_order:
L_aug = np.copy(L_ind)
for item in chain(self.c_tree.nodes(), self.c_tree.edges()):
if isinstance(item, int):
C = self.c_tree.node[item]
elif isinstance(item, tuple):
C = self.c_tree[item[0]][item[1]]
else:
raise ValueError(item)
members = list(C["members"])
# With unary maximal clique, just store its existing index
C["start_index"] = members[0] * self.cardinality
C["end_index"] = (members[0] + 1) * self.cardinality
return L_aug
else:
return L_ind
def _build_mask(self) -> None:
"""Build mask applied to O^{-1}, O for the matrix approx constraint."""
self.mask = torch.ones(self.d, self.d).byte()
for ci in self.c_data.values():
si = ci.start_index
ei = ci.end_index
for cj in self.c_data.values():
sj, ej = cj.start_index, cj.end_index
# Check if ci and cj are part of the same maximal clique
# If so, mask out their corresponding blocks in O^{-1}
if len(ci.max_cliques.intersection(cj.max_cliques)) > 0:
self.mask[si:ei, sj:ej] = 0
self.mask[sj:ej, si:ei] = 0
def _generate_O(self, L: np.ndarray, higher_order: bool = False) -> None:
"""Generate overlaps and conflicts matrix from label matrix.
Parameters
----------
L
An [n,m] label matrix with values in {0,1,...,k}
higher_order
Whether to include higher-order correlations (e.g. LF pairs) in matrix
"""
L_aug = self._get_augmented_label_matrix(L, higher_order=higher_order)
self.d = L_aug.shape[1]
self.O = (
torch.from_numpy(L_aug.T @ L_aug / self.n).float().to(self.config.device)
)
def _init_params(self) -> None:
r"""Initialize the learned params.
- \mu is the primary learned parameter, where each row corresponds to
the probability of a clique C emitting a specific combination of labels,
conditioned on different values of Y (for each column); that is:
self.mu[i*self.cardinality + j, y] = P(\lambda_i = j | Y = y)
and similarly for higher-order cliques.
Raises
------
ValueError
If prec_init shape does not match number of LFs
"""
# Initialize mu so as to break basic reflective symmetry
# Note that we are given either a single or per-LF initial precision
# value, prec_i = P(Y=y|\lf=y), and use:
# mu_init = P(\lf=y|Y=y) = P(\lf=y) * prec_i / P(Y=y)
# Handle single values
if isinstance(self.train_config.prec_init, (int, float)):
self._prec_init = self.train_config.prec_init * torch.ones(self.m)
if self._prec_init.shape[0] != self.m:
raise ValueError(f"prec_init must have shape {self.m}.")
# Get the per-value labeling propensities
# Note that self.O must have been computed already!
lps = torch.diag(self.O).cpu().detach().numpy()
# TODO: Update for higher-order cliques!
self.mu_init = torch.zeros(self.d, self.cardinality)
for i in range(self.m):
for y in range(self.cardinality):
idx = i * self.cardinality + y
mu_init = torch.clamp(lps[idx] * self._prec_init[i] / self.p[y], 0, 1)
self.mu_init[idx, y] += mu_init
# Initialize randomly based on self.mu_init
self.mu = nn.Parameter(self.mu_init.clone() * np.random.random()).float()
# Build the mask over O^{-1}
self._build_mask()
def _get_conditional_probs(self, mu: np.ndarray) -> np.ndarray:
r"""Return the estimated conditional probabilities table given parameters mu.
Given a parameter vector mu, return the estimated conditional probabilites
table cprobs, where cprobs is an (m, k+1, k)-dim np.ndarray with:
cprobs[i, j, k] = P(\lf_i = j-1 | Y = k)
where m is the number of LFs, k is the cardinality, and cprobs includes the
conditional abstain probabilities P(\lf_i = -1 | Y = y).
Parameters
----------
mu
An [m * k, k] np.ndarray with entries in [0, 1]
Returns
-------
np.ndarray
An [m, k + 1, k] np.ndarray conditional probabilities table.
"""
cprobs = np.zeros((self.m, self.cardinality + 1, self.cardinality))
for i in range(self.m):
# si = self.c_data[(i,)]['start_index']
# ei = self.c_data[(i,)]['end_index']
# mu_i = mu[si:ei, :]
mu_i = mu[i * self.cardinality : (i + 1) * self.cardinality, :]
cprobs[i, 1:, :] = mu_i
# The 0th row (corresponding to abstains) is the difference between
# the sums of the other rows and one, by law of total probability
cprobs[i, 0, :] = 1 - mu_i.sum(axis=0)
return cprobs
def get_conditional_probs(self) -> np.ndarray:
r"""Return the estimated conditional probabilities table.
Return the estimated conditional probabilites table cprobs, where cprobs is an
(m, k+1, k)-dim np.ndarray with:
cprobs[i, j, k] = P(\lf_i = j-1 | Y = k)
where m is the number of LFs, k is the cardinality, and cprobs includes the
conditional abstain probabilities P(\lf_i = -1 | Y = y).
Returns
-------
np.ndarray
An [m, k + 1, k] np.ndarray conditional probabilities table.
"""
return self._get_conditional_probs(self.mu.cpu().detach().numpy())
def get_weights(self) -> np.ndarray:
"""Return the vector of learned LF weights for combining LFs.
Returns
-------
np.ndarray
[m,1] vector of learned LF weights for combining LFs.
Example
-------
>>> L = np.array([[1, 1, 1], [1, 1, -1], [-1, 0, 0], [0, 0, 0]])
>>> label_model = LabelModel(verbose=False)
>>> label_model.fit(L, seed=123)
>>> np.around(label_model.get_weights(), 2) # doctest: +SKIP
array([0.99, 0.99, 0.99])
"""
accs = np.zeros(self.m)
cprobs = self.get_conditional_probs()
for i in range(self.m):
accs[i] = np.diag(cprobs[i, 1:, :] @ self.P.cpu().detach().numpy()).sum()
return np.clip(accs / self.coverage, 1e-6, 1.0)
def predict_proba(self, L: np.ndarray) -> np.ndarray:
r"""Return label probabilities P(Y | \lambda).
Parameters
----------
L
An [n,m] matrix with values in {-1,0,1,...,k-1}f
Returns
-------
np.ndarray
An [n,k] array of probabilistic labels
Example
-------
>>> L = np.array([[0, 0, 0], [1, 1, 1], [1, 1, 1]])
>>> label_model = LabelModel(verbose=False)
>>> label_model.fit(L, seed=123)
>>> np.around(label_model.predict_proba(L), 1) # doctest: +SKIP
array([[1., 0.],
[0., 1.],
[0., 1.]])
"""
L_shift = L + 1 # convert to {0, 1, ..., k}
self._set_constants(L_shift)
L_aug = self._get_augmented_label_matrix(L_shift)
mu = self.mu.cpu().detach().numpy()
jtm = np.ones(L_aug.shape[1])
# Note: We omit abstains, effectively assuming uniform distribution here
X = np.exp(L_aug @ np.diag(jtm) @ np.log(mu) + np.log(self.p))
Z = np.tile(X.sum(axis=1).reshape(-1, 1), self.cardinality)
return X / Z
def predict(
self,
L: np.ndarray,
return_probs: Optional[bool] = False,
tie_break_policy: str = "abstain",
) -> Union[np.ndarray, Tuple[np.ndarray, np.ndarray]]:
"""Return predicted labels, with ties broken according to policy.
Policies to break ties include:
"abstain": return an abstain vote (-1)
"true-random": randomly choose among the tied options
"random": randomly choose among tied option using deterministic hash
NOTE: if tie_break_policy="true-random", repeated runs may have slightly different
results due to difference in broken ties
Parameters
----------
L
An [n,m] matrix with values in {-1,0,1,...,k-1}
return_probs
Whether to return probs along with preds
tie_break_policy
Policy to break ties when converting probabilistic labels to predictions
Returns
-------
np.ndarray
An [n,1] array of integer labels
(np.ndarray, np.ndarray)
An [n,1] array of integer labels and an [n,k] array of probabilistic labels
Example
-------
>>> L = np.array([[0, 0, -1], [1, 1, -1], [0, 0, -1]])
>>> label_model = LabelModel(verbose=False)
>>> label_model.fit(L)
>>> label_model.predict(L)
array([0, 1, 0])
"""
Y_probs = self.predict_proba(L)
Y_p = probs_to_preds(Y_probs, tie_break_policy)
if return_probs:
return Y_p, Y_probs
return Y_p
def score(
self,
L: np.ndarray,
Y: np.ndarray,
metrics: Optional[List[str]] = ["accuracy"],
tie_break_policy: str = "abstain",
) -> Dict[str, float]:
"""Calculate one or more scores from user-specified and/or user-defined metrics.
Parameters
----------
L
An [n,m] matrix with values in {-1,0,1,...,k-1}
Y
Gold labels associated with data points in L
metrics
A list of metric names
tie_break_policy
Policy to break ties when converting probabilistic labels to predictions
Returns
-------
Dict[str, float]
A dictionary mapping metric names to metric scores
Example
-------
>>> L = np.array([[1, 1, -1], [0, 0, -1], [1, 1, -1]])
>>> label_model = LabelModel(verbose=False)
>>> label_model.fit(L)
>>> label_model.score(L, Y=np.array([1, 1, 1]))
{'accuracy': 0.6666666666666666}
>>> label_model.score(L, Y=np.array([1, 1, 1]), metrics=["f1"])
{'f1': 0.8}
"""
if tie_break_policy == "abstain": # pragma: no cover
logging.warning(
"Metrics calculated over data points with non-abstain labels only"
)
Y_pred, Y_prob = self.predict(
L, return_probs=True, tie_break_policy=tie_break_policy
)
scorer = Scorer(metrics=metrics)
results = scorer.score(Y, Y_pred, Y_prob)
return results
# These loss functions get all their data directly from the LabelModel
# (for better or worse). The unused *args make these compatible with the
# Classifer._train() method which expect loss functions to accept an input.
def _loss_l2(self, l2: float = 0) -> torch.Tensor:
r"""L2 loss centered around mu_init, scaled optionally per-source.
In other words, diagonal Tikhonov regularization,
||D(\mu-\mu_{init})||_2^2
where D is diagonal.
Parameters
----------
l2
A float or np.array representing the per-source regularization
strengths to use, by default 0
Returns
-------
torch.Tensor
L2 loss between learned mu and initial mu
"""
if isinstance(l2, (int, float)):
D = l2 * torch.eye(self.d)
else:
D = torch.diag(torch.from_numpy(l2)).type(torch.float32)
D = D.to(self.config.device)
# Note that mu is a matrix and this is the *Frobenius norm*
return torch.norm(D @ (self.mu - self.mu_init)) ** 2
def _loss_mu(self, l2: float = 0) -> torch.Tensor:
r"""Overall mu loss.
Parameters
----------
l2
A float or np.array representing the per-source regularization
strengths to use, by default 0
Returns
-------
torch.Tensor
Overall mu loss between learned mu and initial mu
"""
loss_1 = torch.norm((self.O - self.mu @ self.P @ self.mu.t())[self.mask]) ** 2
loss_2 = torch.norm(torch.sum(self.mu @ self.P, 1) - torch.diag(self.O)) ** 2
return loss_1 + loss_2 + self._loss_l2(l2=l2)
def _set_class_balance(
self, class_balance: Optional[List[float]], Y_dev: np.ndarray
) -> None:
"""Set a prior for the class balance.
In order of preference:
1) Use user-provided class_balance
2) Estimate balance from Y_dev
3) Assume uniform class distribution
"""
if class_balance is not None:
self.p = np.array(class_balance)
if len(self.p) != self.cardinality:
raise ValueError(
f"class_balance has {len(self.p)} entries. Does not match LabelModel cardinality {self.cardinality}."
)
elif Y_dev is not None:
class_counts = Counter(Y_dev)
sorted_counts = np.array([v for k, v in sorted(class_counts.items())])
self.p = sorted_counts / sum(sorted_counts)
if len(self.p) != self.cardinality:
raise ValueError(
f"Y_dev has {len(self.p)} class(es). Does not match LabelModel cardinality {self.cardinality}."
)
else:
self.p = (1 / self.cardinality) * np.ones(self.cardinality)
if np.any(self.p == 0):
raise ValueError(
f"Class balance prior is 0 for class(es) {np.where(self.p)[0]}."
)
self.P = torch.diag(torch.from_numpy(self.p)).float().to(self.config.device)
def _set_constants(self, L: np.ndarray) -> None:
self.n, self.m = L.shape
if self.m < 3:
raise ValueError(f"L_train should have at least 3 labeling functions")
self.t = 1
def _create_tree(self) -> None:
nodes = range(self.m)
self.c_tree = get_clique_tree(nodes, [])
def _execute_logging(self, loss: torch.Tensor) -> Metrics:
self.eval()
self.running_examples: int
self.running_loss: float
self.running_loss += loss.item()
self.running_examples += 1
# Always add average loss
metrics_dict = {"train/loss": self.running_loss / self.running_examples}
if self.logger.check():
if self.config.verbose:
self.logger.log(metrics_dict)
# Reset running loss and examples counts
self.running_loss = 0.0
self.running_examples = 0
self.train()
return metrics_dict
def _set_logger(self) -> None:
self.logger = Logger(self.train_config.log_freq)
def _set_optimizer(self) -> None:
parameters = filter(lambda p: p.requires_grad, self.parameters())
optimizer_config = self.train_config.optimizer_config
optimizer_name = self.train_config.optimizer
optimizer: optim.Optimizer # type: ignore
if optimizer_name == "sgd":
optimizer = optim.SGD( # type: ignore
parameters,
lr=self.train_config.lr,
weight_decay=self.train_config.l2,
**optimizer_config.sgd_config._asdict(),
)
elif optimizer_name == "adam":
optimizer = optim.Adam(
parameters,
lr=self.train_config.lr,
weight_decay=self.train_config.l2,
**optimizer_config.adam_config._asdict(),
)
elif optimizer_name == "adamax":
optimizer = optim.Adamax( # type: ignore
parameters,
lr=self.train_config.lr,
weight_decay=self.train_config.l2,
**optimizer_config.adamax_config._asdict(),
)
else:
raise ValueError(f"Unrecognized optimizer option '{optimizer_name}'")
self.optimizer = optimizer
def _set_lr_scheduler(self) -> None:
# Set warmup scheduler
self._set_warmup_scheduler()
# Set lr scheduler
lr_scheduler_name = self.train_config.lr_scheduler
lr_scheduler_config = self.train_config.lr_scheduler_config
lr_scheduler: Optional[optim.lr_scheduler._LRScheduler]
if lr_scheduler_name == "constant":
lr_scheduler = None
elif lr_scheduler_name == "linear":
total_steps = self.train_config.n_epochs
linear_decay_func = lambda x: (total_steps - self.warmup_steps - x) / (
total_steps - self.warmup_steps
)
lr_scheduler = optim.lr_scheduler.LambdaLR( # type: ignore
self.optimizer, linear_decay_func
)
elif lr_scheduler_name == "exponential":
lr_scheduler = optim.lr_scheduler.ExponentialLR(
self.optimizer, **lr_scheduler_config.exponential_config._asdict()
)
elif lr_scheduler_name == "step":
lr_scheduler = optim.lr_scheduler.StepLR(
self.optimizer, **lr_scheduler_config.step_config._asdict()
)
else:
raise ValueError(f"Unrecognized lr scheduler option '{lr_scheduler_name}'")
self.lr_scheduler = lr_scheduler
def _set_warmup_scheduler(self) -> None:
warmup_scheduler: Optional[optim.lr_scheduler.LambdaLR]
if self.train_config.lr_scheduler_config.warmup_steps:
warmup_steps = self.train_config.lr_scheduler_config.warmup_steps
if warmup_steps < 0:
raise ValueError(f"warmup_steps much greater or equal than 0.")
warmup_unit = self.train_config.lr_scheduler_config.warmup_unit
if warmup_unit == "epochs":
self.warmup_steps = int(warmup_steps)
else:
raise ValueError(
"LabelModel does not support any warmup_unit other than 'epochs'."
)
linear_warmup_func = lambda x: x / self.warmup_steps
warmup_scheduler = optim.lr_scheduler.LambdaLR( # type: ignore
self.optimizer, linear_warmup_func
)
if self.config.verbose: # pragma: no cover
logging.info(f"Warmup {self.warmup_steps} steps.")
elif self.train_config.lr_scheduler_config.warmup_percentage:
warmup_percentage = self.train_config.lr_scheduler_config.warmup_percentage
self.warmup_steps = int(warmup_percentage * self.train_config.n_epochs)
linear_warmup_func = lambda x: x / self.warmup_steps
warmup_scheduler = optim.lr_scheduler.LambdaLR( # type: ignore
self.optimizer, linear_warmup_func
)
if self.config.verbose: # pragma: no cover
logging.info(f"Warmup {self.warmup_steps} steps.")
else:
warmup_scheduler = None
self.warmup_steps = 0
self.warmup_scheduler = warmup_scheduler
def _update_lr_scheduler(self, step: int) -> None:
if self.warmup_scheduler and step < self.warmup_steps:
self.warmup_scheduler.step() # type: ignore
elif self.lr_scheduler is not None:
self.lr_scheduler.step() # type: ignore
min_lr = self.train_config.lr_scheduler_config.min_lr
if min_lr and self.optimizer.param_groups[0]["lr"] < min_lr:
self.optimizer.param_groups[0]["lr"] = min_lr
def _clamp_params(self) -> None:
"""Clamp the values of the learned parameter vector.
Clamp the entries of self.mu to be in [mu_eps, 1 - mu_eps], where mu_eps is
either set by the user, or defaults to 1 / 10 ** np.ceil(np.log10(self.n)).
Note that if mu_eps is set too high, e.g. in sparse settings where LFs
mostly abstain, this will result in learning conditional probabilities all
equal to mu_eps (and/or 1 - mu_eps)! See issue #1422.
Note: Use user-provided value of mu_eps in train_config, else default to
mu_eps = 1 / 10 ** np.ceil(np.log10(self.n))
this rounding is done to make it more obvious when the parameters have been
clamped.
"""
if self.train_config.mu_eps is not None:
mu_eps = self.train_config.mu_eps
else:
mu_eps = min(0.01, 1 / 10 ** np.ceil( | np.log10(self.n) | numpy.log10 |
"""
Copyright (C) 2019 NVIDIA Corporation. All rights reserved.
Licensed under the CC BY-NC-SA 4.0 license
(https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode).
"""
import torch
from torch.utils.data import Dataset
import numpy as np
import time
import os
import cv2
import sys
import utils
from datasets.scannet_scene import ScanNetScene
class PlaneDatasetSingle(Dataset):
def __init__(self, options, config, split, random=True, loadNeighborImage=False, load_semantics=False, load_boundary=False):
self.options = options
self.config = config
self.split = split
self.random = random
self.dataFolder = options.dataFolder
self.scenes = []
self.sceneImageIndices = []
self.loadClassMap()
planenet_scene_ids_val = np.load('datasets/scene_ids_val.npy')
planenet_scene_ids_val = {scene_id.decode('utf-8'): True for scene_id in planenet_scene_ids_val}
with open(self.dataFolder + '/ScanNet/Tasks/Benchmark/scannetv1_' + split + '.txt') as f:
for line in f:
scene_id = line.strip()
if split == 'test':
## Remove scenes which are in PlaneNet's training set for fair comparison
if scene_id not in planenet_scene_ids_val:
continue
pass
scenePath = self.dataFolder + '/scans/' + scene_id
if not os.path.exists(scenePath + '/' + scene_id + '.txt') or not os.path.exists(scenePath + '/annotation/planes.npy'):
continue
scene = ScanNetScene(options, scenePath, scene_id, self.confident_labels, self.layout_labels, load_semantics=load_semantics, load_boundary=load_boundary)
self.scenes.append(scene)
self.sceneImageIndices += [[len(self.scenes) - 1, imageIndex] for imageIndex in range(len(scene.imagePaths))]
continue
pass
if random:
t = int(time.time() * 1000000)
np.random.seed(((t & 0xff000000) >> 24) +
((t & 0x00ff0000) >> 8) +
((t & 0x0000ff00) << 8) +
((t & 0x000000ff) << 24))
else:
np.random.seed(0)
pass
np.random.shuffle(self.sceneImageIndices)
self.invalid_indices = {}
with open(self.dataFolder + '/invalid_indices_' + split + '.txt', 'r') as f:
for line in f:
tokens = line.split(' ')
if len(tokens) == 3:
assert(int(tokens[2]) < 10000)
invalid_index = int(tokens[1]) * 10000 + int(tokens[2])
if invalid_index not in self.invalid_indices:
self.invalid_indices[invalid_index] = True
pass
pass
continue
pass
self.sceneImageIndices = [[sceneIndex, imageIndex] for sceneIndex, imageIndex in self.sceneImageIndices if (sceneIndex * 10000 + imageIndex) not in self.invalid_indices]
print('num images', len(self.sceneImageIndices))
self.anchors = utils.generate_pyramid_anchors(config.RPN_ANCHOR_SCALES,
config.RPN_ANCHOR_RATIOS,
config.BACKBONE_SHAPES,
config.BACKBONE_STRIDES,
config.RPN_ANCHOR_STRIDE)
self.loadNeighborImage = loadNeighborImage
return
def loadClassMap(self):
classLabelMap = {}
with open(self.dataFolder + '/scannetv2-labels.combined.tsv') as info_file:
line_index = 0
for line in info_file:
if line_index > 0:
line = line.split('\t')
key = line[1].strip()
if line[4].strip() != '':
label = int(line[4].strip())
else:
label = -1
pass
classLabelMap[key] = label
classLabelMap[key + 's'] = label
classLabelMap[key + 'es'] = label
pass
line_index += 1
continue
pass
confidentClasses = {'wall': True,
'floor': True,
'cabinet': True,
'bed': True,
'chair': False,
'sofa': False,
'table': True,
'door': True,
'window': True,
'bookshelf': False,
'picture': True,
'counter': True,
'blinds': False,
'desk': True,
'shelf': False,
'shelves': False,
'curtain': False,
'dresser': True,
'pillow': False,
'mirror': False,
'entrance': True,
'floor mat': True,
'clothes': False,
'ceiling': True,
'book': False,
'books': False,
'refridgerator': True,
'television': True,
'paper': False,
'towel': False,
'shower curtain': False,
'box': True,
'whiteboard': True,
'person': False,
'night stand': True,
'toilet': False,
'sink': False,
'lamp': False,
'bathtub': False,
'bag': False,
'otherprop': False,
'otherstructure': False,
'otherfurniture': False,
'unannotated': False,
'': False
}
self.confident_labels = {}
for name, confidence in confidentClasses.items():
if confidence and name in classLabelMap:
self.confident_labels[classLabelMap[name]] = True
pass
continue
self.layout_labels = {1: True, 2: True, 22: True, 9: True}
return
def __len__(self):
return len(self.sceneImageIndices)
def transformPlanes(self, transformation, planes):
planeOffsets = np.linalg.norm(planes, axis=-1, keepdims=True)
centers = planes
centers = np.concatenate([centers, np.ones((planes.shape[0], 1))], axis=-1)
newCenters = np.transpose(np.matmul(transformation, np.transpose(centers)))
newCenters = newCenters[:, :3] / newCenters[:, 3:4]
refPoints = planes - planes / np.maximum(planeOffsets, 1e-4)
refPoints = np.concatenate([refPoints, np.ones((planes.shape[0], 1))], axis=-1)
newRefPoints = np.transpose(np.matmul(transformation, np.transpose(refPoints)))
newRefPoints = newRefPoints[:, :3] / newRefPoints[:, 3:4]
planeNormals = newRefPoints - newCenters
planeNormals /= np.linalg.norm(planeNormals, axis=-1, keepdims=True)
planeOffsets = np.sum(newCenters * planeNormals, axis=-1, keepdims=True)
newPlanes = planeNormals * planeOffsets
return newPlanes
def __getitem__(self, index):
t = int(time.time() * 1000000)
np.random.seed(((t & 0xff000000) >> 24) +
((t & 0x00ff0000) >> 8) +
((t & 0x0000ff00) << 8) +
((t & 0x000000ff) << 24))
if self.config.ANCHOR_TYPE == 'layout':
return self.getItemLayout(index)
if self.config.ANCHOR_TYPE == 'structure':
return self.getItemStructure(index)
while True:
if self.random:
index = np.random.randint(len(self.sceneImageIndices))
else:
index = index % len(self.sceneImageIndices)
pass
sceneIndex, imageIndex = self.sceneImageIndices[index]
scene = self.scenes[sceneIndex]
try:
image, planes, plane_info, segmentation, depth, camera, extrinsics = scene[imageIndex]
if len(planes) == 0:
index += 1
continue
except:
index += 1
continue
pass
if segmentation.max() < 0:
index += 1
continue
break
instance_masks = []
class_ids = []
parameters = []
if len(planes) > 0:
if 'joint' in self.config.ANCHOR_TYPE:
distances = np.linalg.norm(np.expand_dims(planes, 1) - self.config.ANCHOR_PLANES, axis=-1)
plane_anchors = distances.argmin(-1)
elif self.config.ANCHOR_TYPE == 'Nd':
plane_offsets = np.linalg.norm(planes, axis=-1)
plane_normals = planes / np.expand_dims(plane_offsets, axis=-1)
distances_N = np.linalg.norm( | np.expand_dims(plane_normals, 1) | numpy.expand_dims |
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
import sys, os
import corgi
import pyplasma as plasma
from configSetup import Configuration
import initialize as init
from visualize import plotNode
from visualize_amr import plotXmesh
from visualize import plotJ, plotE, plotDens
from visualize import saveVisz
from visualize import get_yee
import injector
from timer import Timer
# Generic function to fill the velocity mesh
#
# Maxwellian plasma with Brownian noise
# where delgam = kT/m_i c^2
#
def filler(xloc, uloc, ispcs, conf):
mux_noise = 0.0
delgam_noise = 0.0
brownian_noise = 0.0
x = xloc[0]
y = xloc[1]
z = xloc[2]
ux = uloc[0]
uy = uloc[1]
uz = uloc[2]
#print("uy={} uz={}".format(uy,uz))
#1d filler
if not( (np.abs(uy) < 0.01) and (np.abs(uz) < 0.01) ):
return 0.0
#electrons
if ispcs == 0:
delgam = conf.delgam * | np.abs(conf.mi / conf.me) | numpy.abs |
# -*- coding: utf-8 -*-
"""
last mod 5/22/19
"""
import numpy as np
import numba as nb
from math import floor as mathfloor
from config import lidar_files
from config import present_boxes_file
from config import grndstart, grndstep, grndlen, ground_planes_by_file
from config import anchorstart, anchorstep, anchorlen
from config import nlocaltiles, localgridlen
from config import grnd2checkgrid, grnd4localgrid
from config import anchorinlocalgrid_strided as anchorinlocalgrid
from config import anchorangles_strided as anchorangles
from config import anchornangles_strided as anchornangles
from ground2 import planes2Transforms, tilePoints
from calibs import calib_extrinsics, calib_map
anchorcossins = np.column_stack((np.cos(anchorangles), np.sin(anchorangles)))
anchorcenterpoints = (anchorinlocalgrid - localgridlen//2 -
anchorstart[:2])*anchorstep[:2]
anchorcenter2 = np.einsum(anchorcenterpoints, [0,1], anchorcossins[:,0], [2], [2,0,1])
anchorcenter2[:,:,0] -= np.outer(anchorcossins[:,1], anchorcenterpoints[:,1])
anchorcenter2[:,:,1] += np.outer(anchorcossins[:,1], anchorcenterpoints[:,0])
anchorcenterpoints = anchorcenter2
#@nb.njit(nb.void(nb.f8[:,:], nb.f8[:], nb.f8, nb.b1[:,:,:]))
#def fillPositiveSample(pts, positionnoise, anglenoise, grid):
# for pt in pts
@nb.njit(nb.void(nb.f8[:,:], nb.i8[:], nb.i8, nb.i8, nb.f8[:,:], nb.b1[:,:,:,:]))
def fillLocalGrid(pts, tileidxs, tilex, tiley, groundT, grid):
grid[:] = False
for grnd4localidx in xrange(grnd4localgrid.shape[0]):
tilex2, tiley2 = grnd4localgrid[grnd4localidx]
tile = (tilex+tilex2)*grndlen[1] + tiley+tiley2
pts_idxstart, pts_idxend = tileidxs[tile:tile+2]
for ptsidx in xrange(pts_idxstart, pts_idxend):
pt = pts[ptsidx]
grndpt = np.dot(groundT[:3,:3], pt) + groundT[:3,3]
z = int(mathfloor(grndpt[2]/anchorstep[2])) - anchorstart[2]
if z < 0 or z >= anchorlen[2]: continue
for angle in xrange(anchornangles):
xf = anchorcossins[angle,0]*grndpt[0] + anchorcossins[angle,1]*grndpt[1]
x = int(mathfloor(xf/anchorstep[0])) + localgridlen[0]//2
yf = anchorcossins[angle,0]*grndpt[1] - anchorcossins[angle,1]*grndpt[0]
y = int(mathfloor(yf/anchorstep[1])) + localgridlen[1]//2
if x >= 0 and x < localgridlen[0] and y >= 0 and y < localgridlen[1]:
grid[angle,x,y,z] = True
@nb.njit(nb.b1(nb.f8,nb.f8,nb.f8,nb.f8,nb.f8,nb.f8,
nb.f8,nb.f8,nb.f8,nb.f8,nb.f8,nb.f8, nb.f8))
def rectOverlap(x1,y1,c1,s1,l1,w1, x2,y2,c2,s2,l2,w2, overlap_buffer):
x2in1 = (x2-x1)*c1 + (y2-y1)*s1
y2in1 = (y2-y1)*c1 - (x2-x1)*s1
x1in2 = (x1-x2)*c2 + (y1-y2)*s2
y1in2 = (y1-y2)*c2 - (x1-x2)*s2
cos = abs(c1*c2+s1*s2)
sin = abs(c1*s2-c2*s1)
return not (l1 + l2*cos + w2*sin - abs(x2in1) < overlap_buffer or
w1 + l2*sin + w2*cos - abs(y2in1) < overlap_buffer or
l2 + l1*cos + w1*sin - abs(x1in2) < overlap_buffer or
w2 + l1*sin + w1*cos - abs(y1in2) < overlap_buffer)
# return not (x2in1 + l2*cos + w2*sin + l1 < overlap_buffer or
# l1 - x2in1 + l2*cos + w2*sin < overlap_buffer or
# y2in1 + l2*sin + w2*cos + w1 < overlap_buffer or
# w1 - y2in1 + l2*sin + w2*cos < overlap_buffer or
# x1in2 + l1*cos + w1*sin + l2 < overlap_buffer or
# l2 - x1in2 + l1*cos + w1*sin < overlap_buffer or
# y1in2 + l1*sin + w1*cos + w2 < overlap_buffer or
# w2 - y1in2 + l1*sin + w1*cos < overlap_buffer)
@nb.njit(nb.b1[:,:,:,:]())
def prepLocalNms():
nanchors = anchorinlocalgrid.shape[0]
overlaps = np.zeros((anchornangles, anchornangles, nanchors, nanchors),
dtype=np.bool8)
# length in each direction
# set a little low to only catch close objs
obj_len = 2.
obj_wid = 1.
obj_hypot = np.hypot(obj_len, obj_wid)
overlap_buffer = .4
for angleidx1, angleidx2, anchoridx1, anchoridx2 in np.ndindex(
anchornangles, anchornangles, nanchors, nanchors):
if angleidx2 < angleidx1 or anchoridx2 < anchoridx1:
continue
x1, y1 = anchorcenterpoints[angleidx1, anchoridx1]
x2, y2 = anchorcenterpoints[angleidx2, anchoridx2]
overlap = False
centerdist = np.hypot(x1-x2, y1-y2)
if centerdist < obj_wid*2 - overlap_buffer:
overlap = True
elif centerdist > obj_hypot*2 - overlap_buffer:
overlap = False
else:
cos1, sin1 = anchorcossins[angleidx1]
cos2, sin2 = anchorcossins[angleidx2]
overlap = rectOverlap(x1,y1,cos1,sin1,obj_len,obj_wid,
x2,y2,cos2,sin2,obj_len,obj_wid, overlap_buffer)
if overlap:
overlaps[angleidx1, angleidx2, anchoridx1, anchoridx2] = True
overlaps[angleidx2, angleidx1, anchoridx2, anchoridx1] = True
return overlaps
@nb.njit(nb.void(nb.b1[:,:,:], nb.b1[:,:,:]))
def prepRough(grid, roughX):
xc, yc, zc = grid.shape
roughX[:] = False
for x,y,z in np.ndindex(xc,yc,zc):
roughX[x//3,y//3,z//3] |= grid[x,y,z]
@nb.njit(nb.b1(nb.b1[:,:,:], nb.b1[:,:,:],
nb.i8, nb.i8, nb.i8, nb.i8, nb.i8, nb.i8))
def splitGrid(grid, roughgrid, x1,y1,z1,x2,y2,z2):
#x1,x2,y1,y2,z1,z2 = split
largex1 = x1//3
smallx1 = largex1 if x1 == largex1*3 else largex1 + 1
smallx2 = x2//3
largex2 = smallx2 if x2 == smallx2*3 else smallx2 + 1
largey1 = y1//3
smally1 = largey1 if y1 == largey1*3 else largey1 + 1
smally2 = y2//3
largey2 = smally2 if y2 == smally2*3 else smally2 + 1
largez1 = z1//3
smallz1 = largez1 if z1 == largez1*3 else largez1 + 1
smallz2 = z2//3
largez2 = smallz2 if z2 == smallz2*3 else smallz2 + 1
if np.any(roughgrid[smallx1:smallx2, smally1:smally2, smallz1:smallz2]):
return True
if not np.any(roughgrid[largex1:largex2, largey1:largey2, largez1:largez2]):
return False
return np.any(grid[x1:x2, y1:y2, z1:z2])
@nb.njit(nb.f8(nb.b1[:,:,:], nb.b1[:,:,:], nb.i8, nb.i8, nb.i8,
nb.i8[:,:,:], nb.f8[:,:], nb.i8))
def useBoostedTree2(grid, roughgrid, anchorx, anchory, direction,
btsplits, btleaves, ntrees):
score = 0.
for tree in range(ntrees):
splitidx = 0
for depth in range(3):
tsplit = btsplits[tree, splitidx]
if direction == 0:
x1 = anchorx + tsplit[0]
x2 = anchorx + tsplit[3]
y1 = anchory + tsplit[1]
y2 = anchory + tsplit[4]
else:
x1 = anchorx + 48 - tsplit[3] ### change when changing anchor!!!
x2 = anchorx + 48 - tsplit[0] ### change when changing anchor!!!
y1 = anchory + 32 - tsplit[4] ### change when changing anchor!!!
y2 = anchory + 32 - tsplit[1] ### change when changing anchor!!!
z1 = tsplit[2]
z2 = tsplit[5]
splitidx = splitidx*2+2
if splitGrid(grid, roughgrid, x1,y1,z1,x2,y2,z2):
splitidx -= 1
score += btleaves[tree, splitidx - 7]
if score < btleaves[tree, 8]:
score = -50.
break
return score
"""
returns the samples with the top predictions for a single lidar sweep
"""
@nb.njit(nb.i8(nb.f8[:,:], nb.i8[:], nb.f8[:,:,:,:], nb.i8[:,:,:], nb.f8[:,:],
nb.f8[:,:], nb.b1[:,:,:,:], nb.i8))
def predictNegs(pts, tileidxs, groundTs, btsplits, btleaves,
pts2suppress, detections, detectioncount):
gridshape = (anchornangles, localgridlen[0], localgridlen[1], anchorlen[2])
grid = np.zeros(gridshape, dtype=np.bool8)
nanchors = len(anchorinlocalgrid)
ndetections = detections.shape[0]
ntrees = btsplits.shape[0]
pts2suppress_range = 2+localgridlen*anchorstep[:2]/2.
centerpoint_grid = np.zeros(2, dtype=np.float64)
roughgridshape = (localgridlen[0]//3+1, localgridlen[1]//3+1, anchorlen[2]//3+1)
roughgrid = np.zeros(roughgridshape, dtype=np.bool8)
for grnd2checkgrididx in range(grnd2checkgrid.shape[0]):
centerx, centery = grnd2checkgrid[grnd2checkgrididx]
# determine which suppress points are important
centerpoint_grid[0] = grndstep[0]*(grndstart[0]+centerx+.5)
centerpoint_grid[1] = grndstep[1]*(grndstart[1]+centery+.5)
pts2suppressidxs = np.abs(pts2suppress[:,0]-centerpoint_grid[0]) < pts2suppress_range[0]
pts2suppressidxs &= np.abs(pts2suppress[:,1]-centerpoint_grid[1]) < pts2suppress_range[1]
pts2suppress_local = pts2suppress[pts2suppressidxs].copy()
pts2suppress_local[:,:2] -= centerpoint_grid
npts2suppress = pts2suppress_local.shape[0]
groundT = groundTs[centerx, centery]
fillLocalGrid(pts, tileidxs, centerx, centery, groundT, grid)
for angle in range(anchornangles):
angcos, angsin = anchorcossins[angle]
thisgrid = grid[angle]
prepRough(thisgrid, roughgrid)
for anchoridx in range(nanchors):
anchorx, anchory = anchorinlocalgrid[anchoridx]
anchorcenterptx, anchorcenterpty = anchorcenterpoints[angle,anchoridx]
suppressed = False
for pt2suppressidx in range(npts2suppress):
ptx,pty,ptcos,ptsin = pts2suppress_local[pt2suppressidx]
if (np.hypot(ptx-anchorcenterptx, pty-anchorcenterpty) < 2. and
abs(ptcos*angsin - ptsin*angcos) < .8):
suppressed = True
suppressed |= ((anchorcenterptx+centerpoint_grid[0])*.866 - 1.3 <
abs(anchorcenterpty+centerpoint_grid[1]))
if suppressed: continue
score1 = useBoostedTree2(thisgrid, roughgrid, anchorx, anchory, 0,
btsplits, btleaves, ntrees)
score2 = useBoostedTree2(thisgrid, roughgrid, anchorx, anchory, 1,
btsplits, btleaves, ntrees)
if score1 > score2:
score = score1
direction = 0
else:
score = score2
direction = 1
if score > -30: # otherwise, consider culled
if detectioncount < ndetections:
detectionidx = detectioncount
else:
detectionidx = np.random.randint(detectioncount+1)
if detectionidx < ndetections:
sample = grid[angle, anchorx:anchorx+anchorlen[0],
anchory:anchory+anchorlen[1], :]
if direction:
sample = sample[::-1,::-1]
detections[detectionidx] = sample
detectioncount += 1
return detectioncount
def prepForPredicting(fileidx, objects_to_suppress):
data = np.fromfile(lidar_files.format(fileidx),
dtype=np.float32).reshape((-1,4))[:,:3]
calib_extrinsic = calib_extrinsics[calib_map[fileidx]].copy()
calib_extrinsic[2,3] += 1.65
data = data.dot(calib_extrinsic[:3,:3].T) + calib_extrinsic[:3,3]
# get ground
ground = np.load(ground_planes_by_file.format(fileidx))
pts, tileidxs = tilePoints(data, grndstart, grndstep, grndlen)
groundTs = planes2Transforms(ground)
# get suppressed objects
suppress_start, suppress_end = np.searchsorted(objects_to_suppress[:,0],
[fileidx, fileidx+1])
pts2suppress = objects_to_suppress[suppress_start:suppress_end, 1:3].copy()
pts2suppress = np.zeros((suppress_end-suppress_start, 4))
pts2suppress[:,:2] = objects_to_suppress[suppress_start:suppress_end, 1:3]
pts2suppress[:,2] = np.cos(objects_to_suppress[suppress_start:suppress_end,0])
pts2suppress[:,2] = np.sin(objects_to_suppress[suppress_start:suppress_end,0])
return pts, tileidxs, pts2suppress, groundTs
if __name__ == '__main__':
from config import training_file_start, training_file_end
from time import time
starttime = time()
BT_load_file = '../dataApril19/BT29.npz'
#np.random.seed(200)
nnegatives = 7150
nfilesfornegatives = 60
BTstruct = np.load(BT_load_file)
btsplits = BTstruct['splits']
btleaves = BTstruct['leaves']
files2use = np.random.choice(np.arange(training_file_start, training_file_end),
nfilesfornegatives, replace=False)
objects_to_suppress = np.load(present_boxes_file)
anchoroverlaps = prepLocalNms()
globaldetections = np.zeros(
(nnegatives, anchorlen[0], anchorlen[1], anchorlen[2]), dtype=bool)
detectioncount = 0
for file_idx in files2use:
# load relevant data
data = np.fromfile(lidar_files.format(file_idx),
dtype=np.float32).reshape((-1,4))[:,:3]
calib_extrinsic = calib_extrinsics[calib_map[file_idx]].copy()
calib_extrinsic[2,3] += 1.65
data = data.dot(calib_extrinsic[:3,:3].T) + calib_extrinsic[:3,3]
# get ground
ground = np.load(ground_planes_by_file.format(file_idx))
pts, tileidxs = tilePoints(data, grndstart, grndstep, grndlen)
groundTs = planes2Transforms(ground)
# get suppressed objects
suppress_start, suppress_end = np.searchsorted(objects_to_suppress[:,0],
[file_idx, file_idx+1])
pts2suppress = objects_to_suppress[suppress_start:suppress_end, 1:3].copy()
pts2suppress = np.zeros((suppress_end-suppress_start, 4))
pts2suppress[:,:2] = objects_to_suppress[suppress_start:suppress_end, 1:3]
pts2suppress[:,2] = | np.cos(objects_to_suppress[suppress_start:suppress_end,0]) | numpy.cos |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.