prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
import trimesh
import numpy as np
import sys
from matplotlib.cm import get_cmap as colormap
import os
from glob import glob
import cv2
import shutil
import json
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
sys.path.append('../')
from evaluate import Renderer
from vPlaneRecover.transforms import NYU40_COLORMAP
from vPlaneRecover.datasets.scannet import load_scannet_label_mapping, load_scannet_nyu40_mapping
from fit_plane.util import get_nyu_id2labl
NYU_ID2LAEBL = get_nyu_id2labl()
CLASS_W_LONGSTRIKE = set(['table', 'desk', 'chair', 'window','bookshelf','shelves'])
NONE_PLANE_CLASS = set(['window', 'lamp', 'pillow', 'bag','curtain', 'shower curtain',
'toilet', 'bag', 'mirror', 'person', 'clothes', 'sink'])
INVALID_ID = 16777216 // 100 - 1 # (255,255,255) refer to fit_plane code get_gtPlane_segmt.py
PLANE_AREA_THRES = 0.1
PLANE_EDGE_RATIO_THRES = 0.1
PLANE_AREA_RATIO_THRES = 0.5
PLANE_SEC_EDGE_THRES = 0.2
N_DEPTH_THRES = 0.8
DEPTH_ABSDIFF_THRES = 0.1
DEPTH_RELDIFF_THRES = 0.05
N_VERT_THRES = 120 # same as MergedPlaneAreaThreshold = 120 in get_gtPlane_segmt.py
DEPTH_SHIFT = 1000
width, height = 640, 480
zoom_x, zoom_y = width / 1296., height/ 968.
def get_planeColor_map( n_plane):
# get color
_cmap = np.array(NYU40_COLORMAP[1:])
if n_plane - 40 > 0:
cmap = (colormap('jet')(np.linspace(0, 1, n_plane - 40))[:, :3] * 255).astype(np.uint8)
cmap = cmap[np.random.permutation(n_plane - 40), :]
plane_color = np.concatenate([_cmap, cmap], axis=0)
else:
plane_color = _cmap
return plane_color
def color2plane(mesh):
# convert color w.r.t. plane id
vert_colors = mesh.visual.vertex_colors.view(np.ndarray)
chan_0 = vert_colors[:, 2]
chan_1 = vert_colors[:, 1]
chan_2 = vert_colors[:, 0]
plane_id = (chan_2 * 256 ** 2 + chan_1 * 256 + chan_0) // 100 - 1 # there is no (0,0,0) color in fitting mesh
# mesh.vertex_attributes['plane_id'] = plane_id
return plane_id
def filter_plane_area_only(mesh):
unique_id = np.unique(mesh.vertex_attributes['plane_ins'])
face_color = mesh.visual.face_colors
# filter them with face area again, we cannot compute bbox, because
# hull precision error: Initial simplex is flat, because every piece is flat
for i, cur_id in enumerate(unique_id):
if cur_id == INVALID_ID: continue
# use plane id select vertex
vert_mask = mesh.vertex_attributes['plane_ins'] == cur_id
trg_color = mesh.visual.vertex_colors[vert_mask][0]
face_mask = face_color == trg_color
sub_mesh = mesh.submesh(np.nonzero(face_mask.all(axis=1)))[0]
surface_area = sub_mesh.area_faces.sum()
if surface_area <= PLANE_AREA_THRES:
mesh.visual.vertex_colors[vert_mask] = np.array([255, 255, 255, 255]).astype(np.uint8)
return mesh
def filter_plane_instance(mesh, semseg, plane_param, n_plane):
unique_id = np.unique( mesh.vertex_attributes['plane_ins'])
face_color = mesh.visual.face_colors
verts = mesh.vertices
add_addtion = 0
for cur_id in unique_id:
if cur_id == INVALID_ID: continue
# use plane id select vertex
vert_mask = mesh.vertex_attributes['plane_ins'] == cur_id
param = plane_param[cur_id:cur_id+1]
# debug
# mesh.visual.vertex_colors[vert_mask] = np.array([255, 0,0,255])
# load semseg id
sem_id = np.unique(semseg[vert_mask])
assert len(sem_id>1)
#use face color get submesh
# ensure the plane vert >= 120 as state in fitting
if vert_mask.sum() < N_VERT_THRES or NYU_ID2LAEBL[sem_id[0]] in NONE_PLANE_CLASS:
mesh.visual.vertex_colors[vert_mask] = np.array([255, 255, 255, 255]).astype(np.uint8)
continue
trg_color = mesh.visual.vertex_colors[vert_mask][0]
face_mask = face_color == trg_color
sub_mesh = mesh.submesh(np.nonzero(face_mask.all(axis=1)))[0]
# NOTE, some verts may do not have a face or have a face with different color with the verts color
# we ask all verts become white first, and give the largest one meets the requirement their original color
mesh.visual.vertex_colors[vert_mask] = np.array([255, 255, 255, 255]).astype(np.uint8)
# if sem_id == 5:
# mesh.visual.vertex_colors[vert_mask] = np.array([255, 0, 0, 255]).astype(np.uint8)
# mesh.show()
split_mesh = sub_mesh.split(only_watertight=False)
# we cannot sort the mesh w.r.t. bbox first, because to compute bbox, there must be at least 4 verts,
# but some split piece may not have
verts_masks, surfaces, edge_ratios, sec_edges, area_ratios = [], [], [], [], []
for i, m in enumerate(split_mesh):
# https://stackoverflow.com/questions/16210738/implementation-of-numpy-in1d-for-2d-arrays
vmask = np.in1d(verts.view(dtype='f8,f8,f8').reshape(verts.shape[0]),
m.vertices.view(dtype='f8,f8, f8').reshape(m.vertices.shape[0]))
if vmask.sum() < N_VERT_THRES: continue
# for each spart
bbox = m.bounding_box_oriented # which is oriented bounding box, align with object instead of axis (so it is more tight)
edges = bbox.primitive.extents
max_edge, sec_edge = edges[edges.argsort()[-2:][::-1]]
edge_ratio = sec_edge/max_edge # to remove single long thin surface
bbox_area = max_edge * sec_edge
surface_area = m.area_faces.sum()
area_ratio = surface_area / bbox_area # to remove L, M, T shape surface
# if surface_area > max_surface :
verts_masks.append(vmask)
surfaces.append(surface_area)
edge_ratios.append(edge_ratio)
sec_edges.append(sec_edge)
area_ratios.append(area_ratio)
n_assgined = -1
for mask, surface, edge_ratio, area_ratio, sec_edge in \
zip(verts_masks, surfaces, edge_ratios, area_ratios, sec_edges):
if surface >= PLANE_AREA_THRES and mask.sum() > N_VERT_THRES \
and sec_edge>=PLANE_SEC_EDGE_THRES and edge_ratio >= PLANE_EDGE_RATIO_THRES:
if NYU_ID2LAEBL[sem_id[0]] in CLASS_W_LONGSTRIKE:
if area_ratio >= PLANE_AREA_RATIO_THRES:
n_assgined += 1
else:
n_assgined += 1
# mesh.visual.vertex_colors[mask] = trg_color #np.array([255, 0, 255, 255]).astype(np.uint8)
# uncomment it if wish to split a large one into pieces to ensure connection
# as one plane is splited into 2+, add new param to the result
# if n_assgined > 0:
# add_addtion += 1
# more_color = (n_plane + add_addtion) * 100
# new_color = np.array([more_color / (256 * 256), more_color / 256 % 256, more_color % 256, 255])
# plane_param = np.concatenate([plane_param, param])
# mesh.visual.vertex_colors[mask] = new_color
# all part meet the standard will be preserved as the original one
if n_assgined >= 0:
mesh.visual.vertex_colors[mask] = trg_color
# if n_assgined == -1 and sem_id == 5:
# mesh.visual.vertex_colors[vert_mask] = np.array([255, 255, 255, 255]).astype(np.uint8)
# print(cur_id, sem_id, ":", surface, PLANE_AREA_THRES, '/', edge_ratio, PLANE_EDGE_RATIO_THRES, '/',
# area_ratio, PLANE_AREA_RATIO_THRES, '/', sec_edge, PLANE_SEC_EDGE_THRES)
#
# if sem_id == 5:
# mesh.show()
return mesh, plane_param
def load_ply(src, data_pth, scene_name, label2id, scanNet2nyu):
src_mesh_coarse = trimesh.load(src, process=False)
# ==============================
# load instance label agregated idx
# offer the instance id, and all the over_segmt_pieceID belong to this instance
# ==============================
# each vertex in gt mesh indexs a seg group
segIndices = \
json.load(open('{}/{}/{}_vh_clean_2.0.010000.segs.json'.format(data_pth, scene_name, scene_name), 'r'))[
'segIndices']
# maps seg groups to instances
segGroups = json.load(open('{}/{}/{}.aggregation.json'.format(data_pth, scene_name, scene_name), 'r'))['segGroups']
mapping = {ind: group['label'] for group in segGroups for ind in group['segments']}
# get per vertex instance ids (0 is unknown, [1,...] are objects)
n = len(segIndices)
label_verts = np.zeros(n, dtype=np.long)
for i in range(n):
if segIndices[i] in mapping:
label_verts[i] = scanNet2nyu[label2id[mapping[segIndices[i]]]]
assert len(src_mesh_coarse.vertices) == n
return src_mesh_coarse, label_verts
def main():
train_pth = '/data/ScanNet/ScanNet_raw_data/scannet/scannetv2_train.txt'
data_pth = '/data/ScanNet/ScanNet_raw_data/scannet/scans/'
frm_err_pth = '/data/ScanNet/ScanNet_raw_data/video_plane_fitting/N_depth_thres=0.8/frm_err_pth/'
if not os.path.isdir(frm_err_pth):
os.makedirs((frm_err_pth))
train_scenes = []
with open(train_pth) as vf:
for line in vf:
tmp = line.strip()
# if tmp[-2:] == '00':
train_scenes.append(tmp)
train_scenes.sort()
# train_scenes = train_scenes[:3]
# sum err, num_frame
total_absDiff = np.zeros([len(train_scenes), 2])
total_absRel = np.zeros([len(train_scenes),2])
all_absDiff, all_absRel = [], []
for k, src in enumerate(train_scenes):
print('process:', src)
# read orignal face for depth render, plane fitting mesh have lots of holes between planes
# Our plane fitting algorithm remove the face if the 3 verts have different plane ins id
src_orginal_mesh = trimesh.load('{}/{}/{}_vh_clean_2.ply'.format(data_pth, src, src), process=False)
# ========== render the depth and compare with gt depth ============
# if inconsistent #frame > N, remove the scene, because either the plane fitting or the gt pose is inaccurate
# ===================================================================
# load the gt_depth
gt_depth_pth = '{}/{}/depth/*.png'.format(data_pth, src)
depth_list = sorted(glob(gt_depth_pth), key= lambda x: int(os.path.basename(x)[:-4]))
# load pose path
gt_pose_pth = '{}/{}/pose/'.format(data_pth, src)
# load intrinsic
with open(os.path.join(data_pth, src, '%s.txt' % src)) as info_f:
info = [line.rstrip().split(' = ') for line in info_f]
info = {key: value for key, value in info}
intrinsics = np.array([
[float(info['fx_color'])*zoom_x, 0., float(info['mx_color'])*zoom_x],
[0., float(info['fy_color'])*zoom_y, float(info['my_color'])*zoom_y],
[0., 0., 1.]])
# init render
renderer = Renderer()
mesh_opengl = renderer.mesh_opengl(src_orginal_mesh) # we use the mesh_viz as it has same face as original scannet mesh
# compare gt depth with render depth
# as we will use the rendered_depth to generate tsdf_mesh, the mesh will be prune naturally during that process
sum_absDiff, sum_absRel, n_frm = 0, 0, 0
scene_stat = {}
save_pth = frm_err_pth + '/' + src + '.npz'
if os.path.isfile(save_pth):
data = | np.load(save_pth) | numpy.load |
import numpy as np
from agents.common import *
def generate_move_minimax(
board: np.ndarray, player: BoardPiece, saved_state: Optional[SavedState]
) -> Tuple[PlayerAction, Optional[SavedState]]:
"""
generate move function employing minimax algorithm with/without alpha-beta pruning
Parameters
-----------
board: np.ndarray
board reflecting current game state
player: BoardPiece
player for which the minimax algorithm is supposed to calculate an action
Return
-----------
action
action for player, generated by minimax
"""
depth = 5
"""change comment to use/not use alpha-beta pruning"""
#action, _ = minimax(board, depth, player, True)
action, _ = minimax_alpha_beta_pruning(board, depth, player, True, -np.inf, +np.inf)
return action, saved_state
def minimax_alpha_beta_pruning(
board: np.ndarray, depth, player: BoardPiece, maximizing: bool, alpha, beta
):
"""
Minimax algorithm with alpha-beta pruning
Parameters
-----------
board: np.ndarray
board reflecting current game state
depth: int
depth explored by the minimax algorithm
player: BoardPiece
player for which the minimax algorithm is supposed to calculate an action
maximizing : Bool
if True, result is maximized for the given player
alpha
alpha-value for pruning, if MAXIMIZING = True, should be initialized to -np.inf
beta
beta-value for pruning, if MAXIMIZING = True, should be initialized to +np.inf
Return
-----------
Tuple [action (best column), heuristic_value]
"""
opponent = PLAYER2 if player == PLAYER1 else PLAYER1
"""Edge Case: (terminal node) AI or Player is winning with the next piece or the board is full (draw)"""
terminal_state = check_end_state(board, player)
if terminal_state == GameState.IS_WIN and maximizing: # Maximizing Player is Winning
# print("winning")
return -1, np.inf
elif terminal_state == GameState.IS_WIN and maximizing == False: # Maximizing Player looses because Minimizing player wins
# print("loosing")
return -1, -np.inf
elif terminal_state == GameState.IS_DRAW: # Game over because draw
return -1, None
if depth == 0: # case where depth = 0
value_for_depth0 = evaluate_board(board, player)
return -1, value_for_depth0
valid_columns = get_valid_columns(board)
best_column = np.random.choice(valid_columns, 1)
""" recursive minimizing/maximizing case"""
if maximizing: # maximizing for player
value = -np.inf
for column in valid_columns:
c_board = apply_player_action(board, column, player, True) ### player
_, new_score = minimax_alpha_beta_pruning(c_board, depth - 1, opponent, False, alpha,
beta) ### maximizingTrue is False for next iter, because will be oppnent
if new_score > value:
value = new_score
best_column = column
alpha = max(alpha, new_score)
if alpha >= beta:
break ### beta cut-off
return best_column, value
else: # minimizing for opponent
value = np.inf
for column in valid_columns:
c_board = apply_player_action(board, column, opponent, True) ### opponent is minimizing player
_, new_score = minimax_alpha_beta_pruning(c_board, depth - 1, player, True, alpha,
beta) ### maximizingTrue is True for next iter, because will be player
if new_score < value:
value = new_score
best_column = column
beta = min(beta, new_score)
if alpha >= beta:
break ### alpha cut off
return best_column, value
def minimax(
board: np.ndarray, depth: int, player: BoardPiece, MAXIMIZING: bool
):
"""
Minimax algorithm
Parameters
-----------
board: np.ndarray
board reflecting current game state
depth: int
depth explored by the minimax algorithm
player: BoardPiece
player for which the minimax algorithm is supposed to calculate an action
MAXIMIZING : Bool
if True, result is maximized for the given player
Return
-----------
Tuple [action (best column), heuristic_value]
"""
opponent = PLAYER2 if player == PLAYER1 else PLAYER1
"""Edge Case: (terminal node) AI or Player is winning with the next piece or the board is full (draw)"""
terminal_state = check_end_state(board, player)
if terminal_state == GameState.IS_WIN and MAXIMIZING == True: # Maximizing Player is Winning
# print("winning")
return -1, np.inf
elif terminal_state == GameState.IS_WIN and MAXIMIZING == False: # Maximizing Player looses because Minimizing player wins
# print("loosing")
return -1, -np.inf
elif terminal_state == GameState.IS_DRAW: # Game over because draw
return -1, None
if depth == 0: # case where depth = 0
value_for_depth0 = evaluate_board(board, player)
return -1, value_for_depth0
valid_columns = get_valid_columns(board)
best_column = np.random.choice(valid_columns, 1)
""" recursive minimizing/maximizing case"""
if MAXIMIZING: # maximizing for player
# print("maximizing works")
value = -np.inf
for column in valid_columns:
c_board = apply_player_action(board, column, player, True) ### player
_, new_score = minimax(c_board, depth - 1, opponent,
False) ### maximizingTrue is False for next iter, because will be oppnent
if new_score > value:
value = new_score
best_column = column
# print("after column", column, "best column is", best_column)
return best_column, value
else: # minimizing for opponent
# print("minimizing works")
value = np.inf
for column in valid_columns:
c_board = apply_player_action(board, column, opponent, True) ### opponent is minimizing player
_, new_score = minimax(c_board, depth - 1, player,
True) ### maximizingTrue is True for next iter, because will be player
if new_score < value:
value = new_score
best_column = column
# print("after column", column, "best column is", best_column)
return best_column, value
def get_valid_columns(board: np.ndarray) -> np.ndarray:
"""
Function returning list of all columns with possible valid moves for current board
Parameters
-----------
board: np.ndarray
board reflecting current game state
Return
-----------
valid_columns_array:
array containing indices of all valid columns """
valid_columns_array = [] # initialize empty list
for m, col in enumerate(board.T):
for n, row in enumerate(col):
if board.T[m, n] == 0:
valid_columns_array.append(m)
break
return valid_columns_array
def evaluate_window(window: np.ndarray, player: BoardPiece) -> int:
"""
evaluate a window (fraction of the board) passed into the function by func: evaluate_board
in terms of player pieces, opponent pieces and empty slots
Parameters
-----------
window:
array of shape (4,)
player: BoardPiece
Player for whom the window (i.e. the board) is evaluated
Return
-----------
window_score: int
score for the respective window
"""
window_score = 0
opponent = PLAYER2 if player == PLAYER1 else PLAYER1
"""Raise score if Player has 1 or more pieces in a window"""
if sum(window == player) == 3 and sum(window == NO_PLAYER) == 1:
window_score += 70
if sum(window == player) == 2 and sum(window == NO_PLAYER) == 2:
window_score += 5
if sum(window == player) == 1 and sum(window == NO_PLAYER) == 3:
window_score += 2
"""penalty if opponent hast 1 or more pieces in a window with empty space next to it"""
if sum(window == opponent) == 3 and sum(window == NO_PLAYER) == 1:
window_score -= 70
if sum(window == opponent) == 2 and sum(window == NO_PLAYER) == 2:
window_score -= 5
if sum(window == opponent) == 1 and sum(window == NO_PLAYER) == 3:
window_score -= 2
return window_score
def evaluate_board(
board: np.ndarray, player: BoardPiece
) -> int:
"""
return a score reflecting the state of the board for the given player
Parameters
-----------
board: np.ndarray
board reflecting current game state
player: BoardPiece
Player for whom the the board) is evaluated
Return
-----------
board_score: int
score for the respective window
"""
WINDOW_LENGTH = 4 # Window length to be evaluated (=4 for connect 4)
slide = WINDOW_LENGTH - 1 # reducing parameter based on window length when iterating over the board
board_score = 0 # initialize board score to 0
n_rows, m_columns = board.shape # initialize board shape
""" evaluate horizontally """
for r, row in enumerate(board):
for c in range(m_columns - slide):
window = row[c:c + WINDOW_LENGTH]
board_score += evaluate_window(window, player)
""" evaluate horizontally """
tboard = | np.transpose(board) | numpy.transpose |
import numpy as np
import matplotlib.pyplot as plt
def line_intersect(p,q):
P = np.matrix([[p[0][0],-1],[q[0][0],-1]])
c = - | np.matrix([[p[1][0]],[q[1][0]]]) | numpy.matrix |
import argparse
import numpy as np
from os import path
import struct
from internal import db_handling
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--sift_feature_dir', required=True)
parser.add_argument('--query_txt_file', required=True)
parser.add_argument('--database_file', required=True)
args = parser.parse_args()
return args
def main():
args = parse_args()
db = db_handling.COLMAPDatabase.connect(args.database_file)
db.create_tables()
with open(args.query_txt_file) as f:
for line in f:
name, _, h, w, fx, fy, cx, cy = line.split(' ')
params = np.array([float(fx), float(fy), float(cx), float(cy)])
camera_id = db.add_camera(1, int(h), int(w), params)
image_id = db.add_image(path.join('images', name), camera_id)
featurefile = path.join(args.sift_feature_dir,
path.splitext(name)[0] + '.sift')
with open(featurefile, 'rb') as f:
data = f.read()
header = struct.unpack_from('iiiii', data, 0)
_, _, num_points, num_entries, desc_size = header
assert num_entries == 5 and desc_size == 128
offset = 20
keypoints = np.zeros((num_points, 2))
for i in range(num_points):
point = struct.unpack_from('fffff', data, offset)
offset += 20
keypoints[i, :] = np.array((point[1], point[0]))
descriptors = np.zeros((num_points, desc_size))
for i in range(num_points):
descriptor = struct.unpack_from('128B', data, offset)
offset += desc_size
descriptors[i, :] = | np.asarray(descriptor) | numpy.asarray |
import numpy as np
import pandas as pd
import os
import sys
import inspect
import unittest
from joblib import load
currentdir = os.path.dirname(
os.path.abspath(
inspect.getfile(
inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0, parentdir)
from src.lr.text_processing.util import pre_process_nli_df
from src.lr.text_processing.util import get_corpus
from src.lr.text_processing.transformations.wordnet import get_noun_syn_dict
from src.lr.text_processing.transformations.wordnet import p_h_transformation_syn_dict
from src.lr.text_processing.transformations.wordnet import parallelize
from src.lr.training.util import filter_df_by_label
# data_path = parentdir + "/src/data/toy/train.csv"
# assert os.path.exists(data_path)
class SynTrans(unittest.TestCase):
@classmethod
def setUp(cls):
folder = "toy"
cls.n_cores = 2
train_path = parentdir + "/src/data/{}/train.csv".format(folder)
dev_path = parentdir + "/src/data/{}/dev.csv".format(folder)
veto_path = parentdir + "/src/data/{}/syn_veto.csv".format(folder)
cls.syn_path = parentdir + "/src/data/{}/syn_noun.csv".format(folder)
cls.train_path_mod = parentdir + "/src/data/{}/train_p_h_syn_noun.csv".format(folder)
cls.dev_path_mod = parentdir + "/src/data/{}/dev_p_h_syn_noun.csv".format(folder)
train = pd.read_csv(train_path)
dev = pd.read_csv(dev_path)
train = filter_df_by_label(train.dropna()).reset_index(drop=True)
dev = filter_df_by_label(dev.dropna()).reset_index(drop=True)
pre_process_nli_df(train)
pre_process_nli_df(dev)
cls.train = train
cls.dev = dev
cls.veto = pd.read_csv(veto_path).veto.values
@classmethod
def tearDown(cls):
for path in [cls.train_path_mod, cls.dev_path_mod, cls.syn_path]:
if os.path.exists(path):
os.remove(path)
def test_syn_transformation(self):
# get syn dict
syn_dict = get_noun_syn_dict(df=self.train, n_cores=self.n_cores, veto=self.veto)
# removing possible verbs
syn_dict = {k: syn_dict[k] for k in syn_dict if k[-3:] != "ing"}
# saving to a dataframe
key = sorted(syn_dict.keys())
value = [syn_dict[k] for k in key]
syn_df = pd.DataFrame({"key": key,
"value": value})
train_t = p_h_transformation_syn_dict(df=self.train, syn_dict=syn_dict)
dev_t = p_h_transformation_syn_dict(df=self.dev, syn_dict=syn_dict)
self.assertTrue(not np.any(self.train.premise == train_t.premise))
self.assertTrue(not np.any(self.train.hypothesis == train_t.hypothesis))
self.assertTrue(np.all(self.train.label == train_t.label))
self.assertTrue(not np.any(self.dev.premise == dev_t.premise))
self.assertTrue(not | np.any(self.dev.hypothesis == dev_t.hypothesis) | numpy.any |
#!/usr/bin/env python3
"""
Created on Wed Feb 12 10:44:59 2020
@author: <NAME>
Skeleton modified from
https://www.tensorflow.org/tutorials/customization/custom_training
https://www.tensorflow.org/tutorials/customization/custom_training_walkthrough
Training of an RBM parametrization of the unitary matrix that diagonalises the extended HAMILTONIAN of harmonically driven quantum systems
==================== IMPORTANT NOTE ========================
============================================================
"""
############### IMPORT WHAT WE NEED FROM THE OS/PYTHON DISTRIBUTION #####################
from __future__ import absolute_import, division, print_function, unicode_literals
try:
# %tensorflow_version only exists in Colab.
get_ipython().run_line_magic('tensorflow_version', '2.x')
except Exception:
pass
import tensorflow as tf
import numpy as np
import math as m
#import matplotlib as mpl
import matplotlib.pyplot as plt
############### IMPORT WHAT WE NEED FROM THIS PROJECT #####################
import model as Model
import matrixmanipulation as mp
import NN_model as NN_Model
import Training as training
############### FULL RUN ################################
# 1. CALCULATE THE EXACT FLOQUET STATES
# 2. CALCULATE THE RBM REPRESENTATION
H = Model.Hamiltonian()
N = 32
CentralFloquetE = np.zeros([N,2],dtype=np.float64)
CentralFloquetEVec = np.zeros([N,H.dim,2],dtype=np.complex128)
CentralFloquetE_RBM = np.zeros([N,2],dtype=np.float64)
CentralFloquetEVec_RBM = np.zeros([N,H.dim,2],dtype=np.complex128)
loss_list_RWA = tf.Variable([],dtype=tf.float64)
N_training = 4
N_tr_ph = 2
N_tr_rho = 2
file = open('RBM_TrainingFloquet.dat','wb')
optimizer = tf.keras.optimizers.Adam(learning_rate=0.1, beta_1=0.9, beta_2=0.999,
epsilon=1e-07, amsgrad=False,name='Adam')
#optimizer = tf.keras.optimizers.SGD(learning_rate=0.1)
#%%
file_psi = open('absPsi_comparison_Floquet_3D.dat','w')
for i in [4,16,31]:#[16,18]:#:,4,6,8,10,12,14,16,18,20,22,24,26,28,30,32]:#range(N):
delta = 1.7
Omega = 10.0*i/N
phase = np.arctan(1.0)/4.1341341
print(i,delta,Omega,phase)
N_training = 256
H = Model.Hamiltonian(delta,Omega,phase)
###### FITTING A WAVEFUNCTION:
###### loss_Floquet,loss_Floquet_phase : fits the Floquet wavefunction
###### loss_RWA,loss_RWA_phase : fits the RWA wavefunction
trained_parameters,loss_history = training.RMB_training_Psi(N_training,H,
Model.loss_Floquet,
Model.loss_Floquet_Phase)
#trained_parameters,loss_history = training.RMB_training_Psi(N_training,H,
# Model.loss_RWA,
# Model.loss_RWA_Phase)
model = Model.RBM_Model(delta,Omega,phase,trained_parameters)
UF = Model.Unitary_Matrix(model)
U_ = tf.transpose(tf.math.conj(UF))@model.H_TLS@UF
if (UF.shape[1] > model.S):
index_ = int(model.S*((UF.shape[1]/model.S -1))/2)
else:
index_ = 0
CentralFloquetEVec_RBM[i,:,0] = UF[:,index_ ].numpy()
CentralFloquetEVec_RBM[i,:,1] = UF[:,index_+1].numpy()
CentralFloquetE_RBM[i,0] = tf.math.real(U_[index_ , index_ ]).numpy()
CentralFloquetE_RBM[i,1] = tf.math.real(U_[index_ + 1 , index_ + 1]).numpy()
absPsi_comparison = np.zeros([UF.shape[0],11],dtype=np.double)
absPsi_comparison[:,0] = np.linspace(0,UF.shape[0]-1,UF.shape[0],dtype=np.int)
absPsi_comparison[:,1] = np.mod(np.linspace(0,UF.shape[0]-1,UF.shape[0],dtype=np.int),2) - 0.5
absPsi_comparison[:,2] = absPsi_comparison[:,0]//2 - 16
absPsi_comparison[:,3:5] = Model.Rect2Pol(UF[:,model.N_Floquet_UF*model.S:model.N_Floquet_UF*model.S+1])
absPsi_comparison[:,5:7] = Model.Rect2Pol(UF[:,model.N_Floquet_UF*model.S+1:model.N_Floquet_UF*model.S+2])
aux = np.zeros([66,1],dtype=np.complex)
aux[:,0] = model.U_Floquet[:,0]
absPsi_comparison[:,7:9] = Model.Rect2Pol(aux)
aux[:,0] = model.U_Floquet[:,1]
absPsi_comparison[:,9:11] = Model.Rect2Pol(aux)
np.savetxt(file_psi,absPsi_comparison)
file_psi.write("\n\n")
np.savetxt(file_psi,loss_history)
file_psi.write("\n\n")
plt.plot(loss_history)
plt.show()
plt.plot(np.abs(UF[:,model.N_Floquet_UF*model.S:model.N_Floquet_UF*model.S+2]))
plt.show()
plt.plot(np.abs(model.U_Floquet[:,:]))
plt.show()
plt.imshow(np.abs(U_.numpy()))
plt.show()
file_psi.close()
#%%
for i in [16]:#:,4,6,8,10,12,14,16,18,20,22,24,26,28,30,32]:#range(N):
delta = 1.7
Omega = 10.0*i/N
phase = np.arctan(1.0)/4.1341341
N_training= 12000
trained_parameters,loss_history = training.RBM_training_FloquetStates(N_training,H)
model = Model.RBM_Model(delta,Omega,phase,trained_parameters)
UF = Model.Unitary_Matrix(model)
U_ = tf.transpose(tf.math.conj(UF))@model.H_TLS@UF
if (UF.shape[1] > model.S):
index_ = int(model.S*((UF.shape[1]/model.S -1))/2)
else:
index_ = 0
CentralFloquetEVec_RBM[i,:,0] = UF[:,index_ ].numpy()
CentralFloquetEVec_RBM[i,:,1] = UF[:,index_+1].numpy()
CentralFloquetE_RBM[i,0] = tf.math.real(U_[index_ , index_ ]).numpy()
CentralFloquetE_RBM[i,1] = tf.math.real(U_[index_ + 1 , index_ + 1]).numpy()
plt.plot(np.abs(UF[:,model.N_Floquet_UF*model.S:model.N_Floquet_UF*model.S+2]))
plt.show()
plt.plot(np.abs(model.U_Floquet[:,:]))
plt.show()
plt.imshow(np.abs(U_.numpy()))
plt.show()
#%%
########## TRAINING AGAINST THE RWA ###########################
########## TO OBTAIN AN INITIAL SET RBM PARAMETERS ################
########## THIS IS EQUIVALENT TO RECONSTRUCT A SET OF WAVE FUNCTIONS ######
for i in [14,16,18]:#:,4,6,8,10,12,14,16,18,20,22,24,26,28,30,32]:#range(N):
delta = 1.7
Omega = 10.0*i/N
phase = np.arctan(1.0)/4.1341341
print(i,delta,Omega,phase)
N_training = 4
H = Model.Hamiltonian(delta,Omega,phase)
model = Model.RBM_Model(delta,Omega,phase)
CentralFloquetEVec[i,:,0] = model.U_Floquet[:,0].numpy()
CentralFloquetEVec[i,:,1] = model.U_Floquet[:,1].numpy()
CentralFloquetE[i,0] = tf.math.real(model.E_Floquet[0]).numpy()
CentralFloquetE[i,1] = tf.math.real(model.E_Floquet[1]).numpy()
optimizer = tf.keras.optimizers.Adam(learning_rate=0.1, beta_1=0.9, beta_2=0.999,
epsilon=1e-07, amsgrad=False,name='Adam')
for j in range(N_training):#[0,1,2,3,5]:
# Fit the norm
for i_ in range(N_tr_rho):
#loss_value, grads = Model.grad_fun(model,Model.loss_Psi_rho)
loss_value, grads = Model.grad_fun(model,Model.loss_RWA)
#loss_value, grads = Model.grad_fun(model,Model.loss_Floquet)
loss_list_RWA = tf.concat([loss_list_RWA,[loss_value.numpy()]],axis=0)
#optimizer.apply_gradients(zip(grads, model.trainable_variables))
optimizer.apply_gradients(zip(grads[0:3], model.trainable_variables[0:3]))
# Fit the phase
for i_ in range(N_tr_ph):
#loss_value, grads = Model.grad_Phase(model,Model.loss_Psi_Phase)
loss_value, grads = Model.grad_Phase(model,Model.loss_RWA_Phase)
#loss_value, grads = Model.grad_Phase(model,Model.loss_Floquet_Phase)
loss_list_RWA = tf.concat([loss_list_RWA,[loss_value.numpy()]],axis=0)
optimizer.apply_gradients(zip(grads, model.trainable_variables[3:6]))
UF = Model.Unitary_Matrix(model)
#print("Hamiltonian in the dressed basis (RBM parametrisation): ")
U_ = tf.transpose(tf.math.conj(UF))@model.H_TLS@UF
#print(U_)
if (UF.shape[1] > model.S):
index_ = int(model.S*((UF.shape[1]/model.S -1))/2)
else:
index_ = 0
CentralFloquetEVec_RBM[i,:,0] = UF[:,index_ ].numpy()
CentralFloquetEVec_RBM[i,:,1] = UF[:,index_+1].numpy()
CentralFloquetE_RBM[i,0] = tf.math.real(U_[index_ , index_ ]).numpy()
CentralFloquetE_RBM[i,1] = tf.math.real(U_[index_ + 1 , index_ + 1]).numpy()
####################################################################
####################################################################
####################################################################
####################################################################
####################################################################
####### FINDING THE FLOQUET OPERATOR TRAINING A RBM ##############
####################################################################
optimizer = tf.keras.optimizers.Adam(learning_rate=0.001, beta_1=0.9, beta_2=0.999,
epsilon=1e-07, amsgrad=False,name='Adam')
#optimizer = tf.keras.optimizers.SGD(learning_rate=0.00001)
loss_list = tf.Variable([],dtype=tf.float64)
CentralFloquetE_RBM2 = np.zeros([N,2], dtype = np.float64)
CentralFloquetEVec_RBM2 = | np.zeros([N,H.dim,2],dtype = np.complex128) | numpy.zeros |
import numpy as np
import scipy.special as ss
import scipy.signal as ss2
import scipy
from numpy import abs, sin, cos, real, exp, pi, sqrt
def psi_s(z, x, beta):
"""
2D longitudinal potential
Eq. (23) from Ref[1] with no constant factor (e*beta**2/2/rho**2).
Ref[1]: <NAME> and <NAME>, PRAB 23, 014402 (2020).
Note that 'x' here corresponds to 'chi = x / rho' in the paper.
"""
#try:
out = (cos(2 * alpha(z, x, beta)) - 1 / (1+x)) / (
kappa(z, x, beta) - beta * (1+x) * sin(2*alpha(z, x, beta)))
#except ZeroDivisionError:
# out = 0
# print(f"Oops! ZeroDivisionError at (z,x)= ({z:5.2f},{x:5.2f}). Returning 0.")
return np.nan_to_num(out)
def psi_x_where_x_equals_zero(z, dx, beta):
"""
Evaluate psi_x close to x = 0
This is a rough approximation of the singularity across x = 0
"""
return (psi_x(z, -dx/2, beta) + psi_x(z, dx/2, beta))/2
@np.vectorize
def ss_ellipf(phi, m):
y = ss.ellipkinc(phi, m)
# y = np.float(y)
return y
@np.vectorize
def ss_ellipe(phi, m):
y = ss.ellipeinc(phi, m)
# y = np.float(y)
return y
def psi_x(z, x, beta):
"""
Eq.(24) from Ref[1] with argument zeta=0 and no constant factor e*beta**2/2/rho**2.
Note that 'x' here corresponds to 'chi = x/rho',
and 'z' here corresponds to 'xi = z/2/rho' in the paper.
"""
# z = np.float(z)
# x = np.float(x)
kap = kappa(z, x, beta)
alp = alpha(z, x, beta)
arg2 = -4 * (1+x) / x**2
try:
T1 = (1/abs(x)/(1 + x) * ((2 + 2*x + x**2) * ss.ellipkinc(alp, arg2)- x**2 * ss.ellipeinc(alp, arg2)))
D = kap**2 - beta**2 * (1 + x)**2 * | sin(2*alp) | numpy.sin |
import numpy as np
import os
defargs = { 'VERBOSE':'TRUE',
'LOGFILE':'snap.logfile',
'TILECOSTTHRESH':500,
'MINREGIONSIZE':100,
'TILEEDGEWEIGHT':2.5,
'SCNDRYARCFLOWMAX':8}
def Snaphu(C,**kwargs):
#get the file names
outfile = ''
while outfile == '' or os.path.isfile(outfile):
num = np.random.randint(1000000)
outfile = '{:0d}.out'.format(num)
infile = '{:0d}.in'.format(num)
cfgfile = '{:0d}.cfg'.format(num)
#save the config file
f = open(cfgfile,'w')
f.write('INFILE {:s}\n'.format(infile))
f.write('LINELENGTH {:d}\n'.format(C.shape[1]))
f.write('OUTFILE {:s}\n'.format(outfile))
keys = list(defargs.keys())
for k in keys:
v = kwargs.get(k,defargs[k])
if isinstance(v,str):
o = k + ' {:s}\n'.format(v)
elif isinstance(v,(np.int,np.int32,np.int64)):
o = k + ' {:d}\n'.format(v)
elif isinstance(v,(np.float,np.float32,np.float64)):
o = k + ' {:f}\n'.format(v)
f.write(o)
f.close()
#save the input data
f = open(infile,'wb')
C.flatten().astype('complex64').tofile(f)
f.close()
#run snaphu
os.system('snaphu -f {:s}'.format(cfgfile))
#remove input file
os.system('rm -v {:s}'.format(infile))
#read the output file
f = open(outfile,'rb')
c = np.fromfile(f,dtype='float32',count=C.size*2).reshape((C.shape[0]*2,C.shape[1]))
f.close()
#create output arrays of amplitude and phase
ia = np.arange(C.shape[0])*2
ip = | np.arange(C.shape[0]) | numpy.arange |
import numpy as np
import pandas as pd
import joblib
import tensorflow as tf
import sys
import functools
import os
import tensorflow.keras.backend as K
from matplotlib import pyplot as plt
# from IPython.display import clear_output
from scipy.stats import gaussian_kde, binned_statistic as binstat
from tensorflow.keras.preprocessing.sequence import pad_sequences
from sklearn.model_selection import ShuffleSplit, GroupShuffleSplit
from sklearn.preprocessing import MinMaxScaler, StandardScaler
from sklearn.metrics import r2_score, mean_squared_error, mean_absolute_error, median_absolute_error
from tensorflow.keras.losses import Loss
from scipy.spatial.distance import jensenshannon as js
class HuberLoss(Loss):
"""
Custom TensorFlow Loss subclass implementing the Huber loss.
"""
def __init__(self, threshold: float = 1):
"""
:param threshold: float
The Huber threshold between L1 and L2 losses.
"""
super().__init__()
self.threshold = threshold
def call(self, y_true, y_pred):
error = y_true - y_pred
is_small_error = tf.abs(error) <= self.threshold
small_error_loss = tf.square(error) / 2
big_error_loss = self.threshold * (tf.abs(error) - (0.5 * self.threshold))
return tf.where(is_small_error, small_error_loss, big_error_loss)
def root_mean_squared_error(y, y_pred, sample_weight=None):
"""
Compute the root mean squared error metric.
"""
value = mean_squared_error(y, y_pred, sample_weight=sample_weight)
return np.sqrt(value)
def process_input_parameters(pars, min_folds_cv=5):
"""
Check the consistency of the input parameters and make modifications if necessary.
:param pars: argparse.Namespace
An argparse namespace object containing the input parameters.
:param min_folds_cv: int
The minimum number of folds required for K-fold cross-validation.
:return: pars, argparse.Namespace
The processed version of the input namespace object.
"""
if len(pars.lcdir) > 1:
assert len(pars.wavebands) == len(pars.lcdir), "The number of items in lcdir must either be 1 or match " \
"the number of items in wavebands."
assert len(pars.wavebands) == len(pars.lcfile_suffices), \
"The number of items in wavebands and lcfile_suffices must match."
if not os.path.isdir(os.path.join(pars.rootdir, pars.outdir)):
os.mkdir(os.path.join(pars.rootdir, pars.outdir))
pars.hparam_grid = np.array(pars.hpars)
# Check if only the CPU is to be used:
if pars.cpu:
os.environ["CUDA_VISIBLE_DEVICES"] = ""
# Join the list elements of pars.subset into a long string:
if pars.subset:
pars.subset = ' '.join(pars.subset)
# Check the number of meta input features:
if pars.meta_input is None:
pars.n_meta = 0
else:
pars.n_meta = len(pars.meta_input)
if pars.nn_type == 'cnn':
pars.n_channels = len(pars.wavebands)
else:
pars.n_channels = 2 * len(pars.wavebands)
if pars.weighing_by_density:
print("Density weighing is ON with cutoff {}".format(pars.weighing_by_density))
else:
print("Density weighing is OFF.")
print("Number of input channels: {}".format(pars.n_channels))
print("Number of meta features: {}".format(pars.n_meta))
if pars.train:
pars.predict = False # We want to train a regression model.
if pars.pick_fold is not None:
for ii in pars.pick_fold:
print(type(ii))
assert isinstance(ii, int) and 0 < ii <= pars.k_fold, \
"pick_fold must be > 0 AND <= k_fold integer"
assert pars.k_fold >= min_folds_cv, \
"pick_fold requires k_fold >= {}".format(min_folds_cv)
pars.refit = False
if not pars.cross_validate:
assert len(pars.hparam_grid) == 1, "Cannot do grid-search of hyper-parameters if cross_validate is False."
pars.refit = True
if pars.explicit_test_frac:
assert pars.refit or pars.ensemble, \
"For the evaluation of the model on the test set, 'refit' or 'ensemble' must be set."
if pars.optimize_lr:
pars.n_epochs = 100
pars.decay = 0.0
pars.save_model = False
pars.cross_validate = False
pars.refit = True
return pars
def read_dataset(filename: str, columns: list = None, subset_expr: str = None, input_feature_names: list = None,
trim_quantiles: list = None, qlo: float = 0.25, qhi: float = 0.75, plothist: bool = False,
histfig: str = "hist.png", dropna_cols: list = None, comment: str = '#', dtype=None):
"""
Loads, trims, and exports dataset to numpy arrays.
:param filename: str
The name of the input file.
:param columns: list of strings
Passed to the usecols parameter of pandas.read_csv()
:param subset_expr: str
Expression for subsetting the input data, passed as the first parameter of pandas.DataFrame.query()
:param input_feature_names: list of strings
An optional subset of the usecols parameter, including the names of the columns to be returned as features.
If None, all columns in usecols will be returned.
:param trim_quantiles: list
An optional subset of the usecols parameter, including the names of the columns to be threshold-rejected
beyond the quantiles specified by qlo and qhi. If None, no quantile-trimming will be performed.
:param qlo: float
Lower quantile for threshold rejection.
:param qhi: float
Upper quantile for threshold rejection.
:param plothist: bool
If True, the histograms of the columns in usecols will be plotted before and, if performed, after quantile trimming.
:param histfig: str
The name of the output histogram figure file if plothist is True.
:param dropna_cols:
:param comment:
:param dtype:
:return:
"""
with open(filename) as f:
header = f.readline()
cols = header.strip('#').split()
df = pd.read_csv(filename, names=cols, header=None, sep="\s+", usecols=columns, comment=comment, dtype=dtype)
if dropna_cols is not None:
df.dropna(inplace=True, subset=dropna_cols)
ndata = len(df)
print(df.head())
print("----------\n{} lines read from {}\n".format(ndata, filename))
df_orig = df
# Apply threshold rejections:
if subset_expr is not None:
df = df.query(subset_expr)
ndata = len(df)
print("{} lines after threshold rejections\n".format(ndata))
# plot histogram for each column in original dataset
if plothist:
fig, ax = plt.subplots(figsize=(20, 10))
fig.clf()
_ = pd.DataFrame.hist(df, bins=int(np.ceil(np.cbrt(ndata) * 2)), figsize=(20, 10), grid=False, color='red',
ax=ax)
plt.savefig(histfig)
# omit data beyond specific quantiles [qlo, qhi]
if trim_quantiles is not None:
dfq = df[trim_quantiles]
quantiles = pd.DataFrame.quantile(dfq, q=[qlo, qhi], axis=0, numeric_only=True, interpolation='linear')
print("Values at [{},{}] quantiles to be applied for data trimming:".format(qlo, qhi))
print(quantiles.sum)
mask = (dfq > dfq.quantile(qlo)) & (dfq < dfq.quantile(qhi))
# print(mask)
mask = mask.all(axis=1)
# print(mask.shape)
df = pd.DataFrame.dropna(df[mask])
ndata = len(df)
print("\n{} lines remained after quantile rejection.\n".format(ndata))
# plot histogram for each column in trimmed dataset
if plothist:
fig, ax = plt.subplots(figsize=(20, 10))
_ = pd.DataFrame.hist(df, bins=int(np.ceil(np.cbrt(ndata) * 2)), figsize=(20, 10), grid=False,
color='green', ax=ax)
fig.savefig("hist_trim.png", format="png")
if input_feature_names is not None:
return df.loc[:, input_feature_names], df_orig
else:
return df, df_orig
def read_time_series_for_rnn(name_list, source_dir, nts, input_wavebands, ts_file_suffix, rootdir="",
periods=None, max_phase=1.0, phase_shift=None, nbins=None):
print("Reading time series...", file=sys.stderr)
n_data = len(name_list)
scaler = StandardScaler(copy=True, with_mean=True, with_std=False)
X_list = list()
times_dict = dict()
mags_dict = dict()
phases_dict = dict()
if nbins is not None:
print("Light curves will be binned to max. {0} points in [0, {1:.1f}].".format(nbins, max_phase))
for iband, waveband in enumerate(input_wavebands):
X = np.zeros((n_data, nts, 2)) # Input shape required by an RNN: (batch_size, time_steps, features)
phases = list()
times = list()
mags = list()
if len(source_dir) > 1:
directory = source_dir[iband]
else:
directory = source_dir[0]
for ii, name in enumerate(name_list):
print('Reading data for {}\r'.format(name), end="", file=sys.stderr)
pp, mm = np.genfromtxt(os.path.join(rootdir, directory, name + ts_file_suffix[iband]),
unpack=True, comments='#')
phasemask = (pp < max_phase)
pp = pp[phasemask]
mm = mm[phasemask]
if phase_shift is not None:
pp = get_phases(1.0, pp, shift=phase_shift, all_positive=True)
inds = np.argsort(pp)
pp = pp[inds]
mm = mm[inds]
if nbins is not None:
pp, mm = binlc(pp, mm, nbins=nbins, max_y=max_phase)
if periods is not None:
tt = pp * periods[ii]
else:
tt = pp
# here we only subtract the mean:
mm = scaler.fit_transform(mm.reshape(-1, 1)).flatten()
times.append(tt)
mags.append(mm)
phases.append(pp)
times_padded = pad_sequences(times, maxlen=nts, dtype='float64', padding='post', truncating='post', value=-1)
mags_padded = pad_sequences(mags, maxlen=nts, dtype='float64', padding='post', truncating='post', value=-1)
X[:, :, 0] = times_padded
X[:, :, 1] = mags_padded
X_list.append(X)
times_dict[waveband] = times
mags_dict[waveband] = mags
phases_dict[waveband] = phases
# Create final data matrix for the time series:
X = np.concatenate(X_list, axis=2)
print("")
return X, times_dict, mags_dict, phases_dict
def read_time_series_for_cnn(name_list, source_dir, nts, input_wavebands, ts_file_suffix, nuse=1,
rootdir="", n_aug=None):
nmags = int(nts / nuse)
n_data = len(name_list)
if n_aug is not None:
assert isinstance(n_aug, int) and n_aug > 0, \
"n_aug must be a positive integer"
dict_x_ts = dict()
for waveband in input_wavebands:
dict_x_ts[waveband] = np.zeros((n_data, nmags))
if n_aug is not None:
dict_x_ts[waveband] = np.zeros((n_data * n_aug, nmags))
groups = np.zeros((n_data * n_aug))
dict_x_ts_scaled = dict()
print("Reading time series...", file=sys.stderr)
for ii, name in enumerate(name_list):
print('Reading data for {}\r'.format(name), end="", file=sys.stderr)
for iband, waveband in enumerate(input_wavebands):
if len(source_dir) > 1:
directory = source_dir[iband]
else:
directory = source_dir[0]
if n_aug is None:
phases, timeseries = np.genfromtxt(os.path.join(directory, name + ts_file_suffix[iband]),
unpack=True, comments='#')
phases = phases[0:nts]
timeseries = timeseries[0:nts]
dict_x_ts[waveband][ii][:] = timeseries[nuse - 1::nuse]
groups = None
else:
tsinput = np.genfromtxt(os.path.join(directory, name + ts_file_suffix[iband]),
unpack=False, comments='#')
# check if there are n_aug+1 columns in the data matrix
assert tsinput.shape[1] == n_aug + 1, \
"data matrix in " + os.path.join(directory, name + ts_file_suffix[iband]) + " has wrong shape"
phases = tsinput[0:nts, 0]
for jj in range(n_aug):
timeseries = tsinput[0:nts, jj + 1]
dict_x_ts[waveband][jj + ii * n_aug][:] = timeseries[nuse - 1::nuse]
groups[jj + ii * n_aug] = ii
phases = phases[nuse - 1::nuse]
# Scale the time series to the [0,1] range
scaler = MinMaxScaler(copy=True, feature_range=(0, 1))
ts_list = list()
for ii, waveband in enumerate(input_wavebands):
scaler.fit(dict_x_ts[waveband].T)
dict_x_ts_scaled[waveband] = (scaler.transform(dict_x_ts[waveband].T)).T
ts_list.append(np.expand_dims(dict_x_ts_scaled[waveband], axis=2))
# Create final data matrix for the time series:
X = np.concatenate(ts_list, axis=2)
print("")
return X, dict_x_ts, dict_x_ts_scaled, phases, groups
def cross_validate(model, folds: list, x_list: list or tuple, y,
model_kwargs: dict = {}, compile_kwargs: dict = {},
initial_weights: list = None,
sample_weight_fit=None, sample_weight_eval=None, ids=None,
indices_to_scale: list or tuple = None, scaler=None,
n_epochs: int = 1, batch_size: int = None, shuffle=True, verbose: int = 0,
callbacks: list = [], metrics: list or tuple = None,
log_training=True, log_prefix='', pick_fold: list or tuple = None,
save_data=True, rootdir='.', filename_train='train.dat', filename_val='val.dat',
strategy=None, n_devices=1, validation_freq=1, seed=1):
# Initialize variables:
histories = list()
model_weights = list()
scalers_folds = list()
Y_train_collected = np.array([])
Y_val_collected = np.array([])
Y_train_pred_collected = np.array([])
Y_val_pred_collected = np.array([])
fitting_weights_train_collected = np.array([])
fitting_weights_val_collected = np.array([])
eval_weights_train_collected = np.array([])
eval_weights_val_collected = np.array([])
ids_train_collected = np.array([])
ids_val_collected = np.array([])
numcv_t = np.array([])
numcv_v = np.array([])
# callbacks.append(PrintLearningRate())
if ids is None:
# create IDs by simply numbering the data
ids = np.linspace(1, y.shape[0], y.shape[0]).astype(int)
first_fold = True
for i_cv, (train_index, val_index) in enumerate(folds):
# if pick_fold is not None and pick_fold != i_cv + 1:
if pick_fold is not None and i_cv + 1 not in pick_fold:
continue
# Create and compile the model:
tf.keras.backend.clear_session()
tf.random.set_seed(seed)
if strategy is not None:
# Apply distributed strategy on model if multiple devices are present:
with strategy.scope():
model_ = model(**model_kwargs)
else:
model_ = model(**model_kwargs)
if first_fold:
first_fold = False
model_.summary()
if initial_weights is None:
initial_weights = model_.get_weights()
else:
# Initialize model weights:
model_.set_weights(initial_weights)
if strategy is not None:
with strategy.scope():
model_.compile(**compile_kwargs)
else:
model_.compile(**compile_kwargs)
print("fold " + str(i_cv + 1) + "/" + str(len(folds)))
print("n_train = {} ; n_val = {}".format(train_index.shape[0], val_index.shape[0]))
if log_training:
callbacks_fold = callbacks + [tf.keras.callbacks.CSVLogger(
os.path.join(rootdir, log_prefix + f"_fold{i_cv + 1}.log"))]
else:
callbacks_fold = callbacks
# --------------------------------------------------
# Split the arrays to training and validations sets:
x_train_list = list()
x_val_list = list()
scalers = list()
for i, x in enumerate(x_list):
x_t, x_v = x[train_index], x[val_index]
if indices_to_scale is not None and i in indices_to_scale:
scaler.fit(x_t)
x_t = scaler.transform(x_t)
x_v = scaler.transform(x_v)
scalers.append(scaler.copy())
x_train_list.append(x_t)
x_val_list.append(x_v)
y_train, y_val = y[train_index], y[val_index]
if sample_weight_fit is not None:
fitting_weights_train, fitting_weights_val = sample_weight_fit[train_index], sample_weight_fit[val_index]
else:
fitting_weights_train, fitting_weights_val = None, None
if sample_weight_eval is not None:
eval_weights_train, eval_weights_val = sample_weight_eval[train_index], sample_weight_eval[val_index]
else:
eval_weights_train, eval_weights_val = None, None
ids_t, ids_v = ids[train_index], ids[val_index]
# --------------------------------------------------
# Fit and evaluate the model for this fold:
history = model_.fit(x=x_train_list, y=y_train, sample_weight=fitting_weights_train,
epochs=n_epochs, initial_epoch=0, batch_size=batch_size, shuffle=shuffle,
validation_data=(x_val_list, y_val, fitting_weights_val),
verbose=verbose, callbacks=callbacks_fold, validation_freq=validation_freq)
Y_train_pred = (model_.predict(x_train_list)).flatten()
Y_val_pred = (model_.predict(x_val_list)).flatten()
histories.append(history)
model_weights.append(model_.get_weights())
scalers_folds.append(scalers.copy())
# --------------------------------------------------
# Append the values of this fold to those from the previous fold(s).
Y_train_collected = np.hstack((Y_train_collected, y_train))
Y_val_collected = np.hstack((Y_val_collected, y_val))
Y_train_pred_collected = np.hstack((Y_train_pred_collected, Y_train_pred))
Y_val_pred_collected = np.hstack((Y_val_pred_collected, Y_val_pred))
if sample_weight_fit is not None:
fitting_weights_train_collected = np.hstack((fitting_weights_train_collected, fitting_weights_train))
fitting_weights_val_collected = | np.hstack((fitting_weights_val_collected, fitting_weights_val)) | numpy.hstack |
# pylint: disable=E1101
import torch
import torch.nn as nn
from torch.utils.data import DataLoader, TensorDataset
from sklearn.metrics import average_precision_score, confusion_matrix
import numpy as np
from physionet import PhysioNet, get_data_min_max, variable_time_collate_fn2
from sklearn import model_selection
from sklearn import metrics
from sklearn.metrics import precision_score, recall_score, f1_score
from person_activity import PersonActivity
def one_hot(y_):
# Function to encode output labels from number indexes
# e.g.: [[5], [0], [3]] --> [[0, 0, 0, 0, 0, 1], [1, 0, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0]]
y_ = y_.reshape(len(y_))
y_ = [int(x) for x in y_]
n_values = np.max(y_) + 1
return np.eye(n_values)[np.array(y_, dtype=np.int32)]
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
def log_normal_pdf(x, mean, logvar, mask):
const = torch.from_numpy(np.array([2. * np.pi])).float().to(x.device)
const = torch.log(const)
return -.5 * (const + logvar + (x - mean) ** 2. / torch.exp(logvar)) * mask
def normal_kl(mu1, lv1, mu2, lv2):
v1 = torch.exp(lv1)
v2 = torch.exp(lv2)
lstd1 = lv1 / 2.
lstd2 = lv2 / 2.
kl = lstd2 - lstd1 + ((v1 + (mu1 - mu2) ** 2.) / (2. * v2)) - .5
return kl
def mean_squared_error(orig, pred, mask):
error = (orig - pred) ** 2
error = error * mask
return error.sum() / mask.sum()
def normalize_masked_data(data, mask, att_min, att_max):
# we don't want to divide by zero
att_max[att_max == 0.] = 1.
if (att_max != 0.).all():
data_norm = (data - att_min) / att_max
else:
raise Exception("Zero!")
if torch.isnan(data_norm).any():
raise Exception("nans!")
# set masked out elements back to zero
data_norm[mask == 0] = 0
return data_norm, att_min, att_max
def evaluate(dim, rec, dec, test_loader, args, num_sample=10, device="cuda"):
mse, test_n = 0.0, 0.0
with torch.no_grad():
for test_batch in test_loader:
test_batch = test_batch.to(device)
observed_data, observed_mask, observed_tp = (
test_batch[:, :, :dim],
test_batch[:, :, dim: 2 * dim],
test_batch[:, :, -1],
)
if args.sample_tp and args.sample_tp < 1:
subsampled_data, subsampled_tp, subsampled_mask = subsample_timepoints(
observed_data.clone(), observed_tp.clone(), observed_mask.clone(), args.sample_tp)
else:
subsampled_data, subsampled_tp, subsampled_mask = \
observed_data, observed_tp, observed_mask
out = rec(torch.cat((subsampled_data, subsampled_mask), 2), subsampled_tp)
qz0_mean, qz0_logvar = (
out[:, :, : args.latent_dim],
out[:, :, args.latent_dim:],
)
epsilon = torch.randn(
num_sample, qz0_mean.shape[0], qz0_mean.shape[1], qz0_mean.shape[2]
).to(device)
z0 = epsilon * torch.exp(0.5 * qz0_logvar) + qz0_mean
z0 = z0.view(-1, qz0_mean.shape[1], qz0_mean.shape[2])
batch, seqlen = observed_tp.size()
time_steps = (
observed_tp[None, :, :].repeat(num_sample, 1, 1).view(-1, seqlen)
)
pred_x = dec(z0, time_steps)
pred_x = pred_x.view(num_sample, -1, pred_x.shape[1], pred_x.shape[2])
pred_x = pred_x.mean(0)
mse += mean_squared_error(observed_data, pred_x, observed_mask) * batch
test_n += batch
return mse / test_n
def compute_losses(dim, dec_train_batch, qz0_mean, qz0_logvar, pred_x, args, device):
observed_data, observed_mask \
= dec_train_batch[:, :, :dim], dec_train_batch[:, :, dim:2*dim]
noise_std = args.std
noise_std_ = torch.zeros(pred_x.size()).to(device) + noise_std
noise_logvar = 2. * torch.log(noise_std_).to(device)
logpx = log_normal_pdf(observed_data, pred_x, noise_logvar,
observed_mask).sum(-1).sum(-1)
pz0_mean = pz0_logvar = torch.zeros(qz0_mean.size()).to(device)
analytic_kl = normal_kl(qz0_mean, qz0_logvar,
pz0_mean, pz0_logvar).sum(-1).sum(-1)
if args.norm:
logpx /= observed_mask.sum(-1).sum(-1)
analytic_kl /= observed_mask.sum(-1).sum(-1)
return logpx, analytic_kl
def evaluate_classifier(model, test_loader, dec=None, args=None, classifier=None,
dim=41, device='cuda', reconst=False, num_sample=1, dataset='P12'):
pred = []
true = []
test_loss = 0
for test_batch, label in test_loader:
test_batch, label = test_batch.to(device), label.to(device)
batch_len = test_batch.shape[0]
observed_data, observed_mask, observed_tp \
= test_batch[:, :, :dim], test_batch[:, :, dim:2*dim], test_batch[:, :, -1]
with torch.no_grad():
out = model(
torch.cat((observed_data, observed_mask), 2), observed_tp)
if reconst:
qz0_mean, qz0_logvar = out[:, :,
:args.latent_dim], out[:, :, args.latent_dim:]
epsilon = torch.randn(
num_sample, qz0_mean.shape[0], qz0_mean.shape[1], qz0_mean.shape[2]).to(device)
z0 = epsilon * torch.exp(.5 * qz0_logvar) + qz0_mean
z0 = z0.view(-1, qz0_mean.shape[1], qz0_mean.shape[2])
if args.classify_pertp:
pred_x = dec(z0, observed_tp[None, :, :].repeat(
num_sample, 1, 1).view(-1, observed_tp.shape[1]))
out = classifier(pred_x)
else:
out = classifier(z0)
if args.classify_pertp:
N = label.size(-1)
out = out.view(-1, N)
label = label.view(-1, N)
_, label = label.max(-1)
test_loss += nn.CrossEntropyLoss()(out, label.long()).item() * batch_len * 50.
else:
label = label.unsqueeze(0).repeat_interleave(
num_sample, 0).view(-1)
test_loss += nn.CrossEntropyLoss()(out, label).item() * batch_len * num_sample
pred.append(out.cpu().numpy())
true.append(label.cpu().numpy())
pred = np.concatenate(pred, 0)
true = np.concatenate(true, 0)
acc = np.mean(pred.argmax(1) == true)
if dataset == 'P12' or dataset == 'P19' or dataset == 'eICU':
auc = metrics.roc_auc_score(true, pred[:, 1]) if not args.classify_pertp else 0.
aupr = average_precision_score(true, pred[:, 1]) if not args.classify_pertp else 0.
return test_loss / pred.shape[0], acc, auc, aupr, None, None, None
elif dataset == 'PAM':
auc = metrics.roc_auc_score(one_hot(true), pred) if not args.classify_pertp else 0.
aupr = average_precision_score(one_hot(true), pred) if not args.classify_pertp else 0.
precision = precision_score(true, pred.argmax(1), average='macro', ) if not args.classify_pertp else 0.
recall = recall_score(true, pred.argmax(1), average='macro', ) if not args.classify_pertp else 0.
F1 = 2 * (precision * recall) / (precision + recall) if not args.classify_pertp else 0.
return test_loss/pred.shape[0], acc, auc, aupr, precision, recall, F1
def random_sample(idx_0, idx_1, batch_size):
"""
Returns a balanced sample by randomly sampling without replacement.
:param idx_0: indices of negative samples
:param idx_1: indices of positive samples
:param batch_size: batch size
:return: indices of balanced batch of negative and positive samples
"""
idx0_batch = np.random.choice(idx_0, size=int(batch_size / 2), replace=False)
idx1_batch = np.random.choice(idx_1, size=int(batch_size / 2), replace=False)
idx = np.concatenate([idx0_batch, idx1_batch], axis=0)
return idx
def preprocess_P19(PT_dict, arr_outcomes, labels_ts):
total = []
for i, patient in enumerate(PT_dict):
length = patient['length']
record_id = patient['id']
tt = torch.squeeze(torch.tensor(patient['time'][:length]), 1)
vals = torch.tensor(patient['arr'][:length, :], dtype=torch.float32)
m = np.zeros(shape=patient['arr'][:length, :].shape)
m[patient['arr'][:length, :].nonzero()] = 1
mask = torch.tensor(m, dtype=torch.float32)
outcome = torch.tensor(arr_outcomes[i][0], dtype=torch.float32)
total.append((record_id, tt, vals, mask, outcome))
'''
# calculate and save P19 statistics - age, gender, density scores (can be used for all algorithms)
idx_under_65 = []
idx_over_65 = []
idx_male = []
idx_female = []
for i in range(len(PT_dict)):
if total[i][0] == PT_dict[i]['id']:
age, gender, _, _, _, _ = PT_dict[i]['extended_static']
if age > 0:
if age < 65:
idx_under_65.append(i)
else:
idx_over_65.append(i)
if gender == 0:
idx_female.append(i)
if gender == 1:
idx_male.append(i)
np.save('P19_idx_under_65.npy', np.array(idx_under_65), allow_pickle=True)
np.save('P19_idx_over_65.npy', np.array(idx_over_65), allow_pickle=True)
np.save('P19_idx_male.npy', np.array(idx_male), allow_pickle=True)
np.save('P19_idx_female.npy', np.array(idx_female), allow_pickle=True)
# save density scores
X_features = np.array([d['arr'] for d in PT_dict])
counts = np.count_nonzero(X_features, axis=(0, 1))
ascending_indices = np.argsort(counts)
density_scores = counts / (X_features.shape[0] * 60)
res = [[ind, density_scores[ind], labels_ts[:-1][ind]] for ind in ascending_indices]
np.save('P19_density_scores.npy', res, allow_pickle=True)
'''
return total
def preprocess_eICU(PT_dict, arr_outcomes, labels_ts):
total = []
for i, patient in enumerate(PT_dict):
record_id = str(i)
tt = torch.squeeze(torch.tensor(patient['time']), 1)
vals = torch.tensor(patient['arr'], dtype=torch.float32)
m = np.zeros(shape=patient['arr'].shape)
m[patient['arr'].nonzero()] = 1
mask = torch.tensor(m, dtype=torch.float32)
outcome = torch.tensor(arr_outcomes[i], dtype=torch.float32)
total.append((record_id, tt, vals, mask, outcome))
'''
# calculate and save P19 statistics - gender, density scores (can be used for all algorithms)
idx_male = []
idx_female = []
for i in range(len(PT_dict)):
if total[i][0] == str(i):
vec = PT_dict[i]['extended_static']
if vec[-3] > 0:
idx_female.append(i)
if vec[-4] > 0:
idx_male.append(i)
print('\nOnly 1.329/36.443 samples have gender data available.\n')
np.save('eICU_idx_male.npy', np.array(idx_male), allow_pickle=True)
np.save('eICU_idx_female.npy', np.array(idx_female), allow_pickle=True)
# save density scores
X_features = np.array([d['arr'] for d in PT_dict])
counts = np.count_nonzero(X_features, axis=(0, 1))
ascending_indices = np.argsort(counts)
density_scores = counts / (X_features.shape[0] * 300)
res = [[ind, density_scores[ind], labels_ts[ind]] for ind in ascending_indices]
np.save('eICU_density_scores.npy', res, allow_pickle=True)
'''
return total
def preprocess_PAM(PT_dict, arr_outcomes):
length = 600
total = []
for i, patient in enumerate(PT_dict):
record_id = str(i)
tt = torch.tensor(list(range(length)))
vals = torch.tensor(patient, dtype=torch.float32)
m = np.zeros(shape=patient.shape)
m[patient.nonzero()] = 1
mask = torch.tensor(m, dtype=torch.float32)
outcome = torch.tensor(arr_outcomes[i][0], dtype=torch.float32)
total.append((record_id, tt, vals, mask, outcome))
return total
def random_sample_8(ytrain, B, replace=False):
""" Returns a balanced sample of tensors by randomly sampling without replacement. """
idx0_batch = np.random.choice(np.where(ytrain == 0)[0], size=int(B / 8), replace=replace)
idx1_batch = np.random.choice(np.where(ytrain == 1)[0], size=int(B / 8), replace=replace)
idx2_batch = np.random.choice(np.where(ytrain == 2)[0], size=int(B / 8), replace=replace)
idx3_batch = np.random.choice(np.where(ytrain == 3)[0], size=int(B / 8), replace=replace)
idx4_batch = np.random.choice(np.where(ytrain == 4)[0], size=int(B / 8), replace=replace)
idx5_batch = np.random.choice(np.where(ytrain == 5)[0], size=int(B / 8), replace=replace)
idx6_batch = np.random.choice(np.where(ytrain == 6)[0], size=int(B / 8), replace=replace)
idx7_batch = np.random.choice(np.where(ytrain == 7)[0], size=int(B / 8), replace=replace)
idx = np.concatenate([idx0_batch, idx1_batch, idx2_batch, idx3_batch, idx4_batch, idx5_batch, idx6_batch, idx7_batch], axis=0)
return idx
def get_data(args, dataset, device, q, upsampling_batch, split_type, feature_removal_level, missing_ratio, flag=1,
reverse=False, predictive_label='mortality'):
if dataset == 'P12':
train_dataset_obj_1 = PhysioNet('data/physionet', train=True,
quantization=q,
download=True, n_samples=12000,
device=device, set_letter='a')
train_dataset_obj_2 = PhysioNet('data/physionet', train=True,
quantization=q,
download=True, n_samples=12000,
device=device, set_letter='b')
train_dataset_obj_3 = PhysioNet('data/physionet', train=True,
quantization=q,
download=True, n_samples=12000,
device=device, set_letter='c')
dataset_1 = train_dataset_obj_1[:len(train_dataset_obj_1)]
dataset_2 = train_dataset_obj_2[:len(train_dataset_obj_2)]
dataset_3 = train_dataset_obj_3[:len(train_dataset_obj_3)]
total_dataset = dataset_1 + dataset_2 + dataset_3
if predictive_label == 'LoS':
los_outcomes = np.load('../saved/LoS_y1_out.npy', allow_pickle=True)
for i, tpl in enumerate(total_dataset):
a, b, c, d, _ = tpl
los_label = los_outcomes[i][0]
los_label = torch.tensor(los_label, dtype=torch.float32)
total_dataset[i] = (a, b, c, d, los_label)
'''
# calculate and save statistics
idx_under_65 = []
idx_over_65 = []
idx_male = []
idx_female = []
P_list = np.load('P_list.npy', allow_pickle=True)
for i in range(len(P_list)):
if total_dataset[i][0] == P_list[i]['id']:
age, gender, _, _, _ = P_list[i]['static']
if age > 0:
if age < 65:
idx_under_65.append(i)
else:
idx_over_65.append(i)
if gender == 0:
idx_female.append(i)
if gender == 1:
idx_male.append(i)
np.save('mtand_idx_under_65.npy', np.array(idx_under_65), allow_pickle=True)
np.save('mtand_idx_over_65.npy', np.array(idx_over_65), allow_pickle=True)
np.save('mtand_idx_male.npy', np.array(idx_male), allow_pickle=True)
np.save('mtand_idx_female.npy', np.array(idx_female), allow_pickle=True)
'''
elif dataset == 'P19':
PT_dict = np.load('../../../P19data/processed_data/PT_dict_list_6.npy', allow_pickle=True)
labels_ts = np.load('../../../P19data/processed_data/labels_ts.npy', allow_pickle=True)
labels_demogr = np.load('../../../P19data/processed_data/labels_demogr.npy', allow_pickle=True)
arr_outcomes = np.load('../../../P19data/processed_data/arr_outcomes_6.npy', allow_pickle=True)
total_dataset = preprocess_P19(PT_dict, arr_outcomes, labels_ts)
elif dataset == 'eICU':
PT_dict = np.load('../../../eICUdata/processed_data/PTdict_list.npy', allow_pickle=True)
labels_ts = np.load('../../../eICUdata/processed_data/eICU_ts_vars.npy', allow_pickle=True)
labels_demogr = np.load('../../../eICUdata/processed_data/eICU_static_vars.npy', allow_pickle=True)
arr_outcomes = np.load('../../../eICUdata/processed_data/arr_outcomes.npy', allow_pickle=True)
total_dataset = preprocess_eICU(PT_dict, arr_outcomes, labels_ts)
elif dataset == 'PAM':
PT_dict = np.load('../../../PAMdata/processed_data/PTdict_list.npy', allow_pickle=True)
arr_outcomes = np.load('../../../PAMdata/processed_data/arr_outcomes.npy', allow_pickle=True)
total_dataset = preprocess_PAM(PT_dict, arr_outcomes)
print('len(total_dataset):', len(total_dataset))
if split_type == 'random':
# Shuffle and split
train_data, test_data = model_selection.train_test_split(total_dataset, train_size=0.9, # 80% train, 10% validation, 10% test
shuffle=True)
elif split_type == 'age' or split_type == 'gender':
if dataset == 'P12':
prefix = 'mtand'
elif dataset == 'P19':
prefix = 'P19'
elif dataset == 'eICU': # possible only with split_type == 'gender'
prefix = 'eICU'
if split_type == 'age':
if dataset == 'eICU':
print('\nCombination of eICU dataset and age split is not possible.\n')
if reverse == False:
idx_train = np.load('%s_idx_under_65.npy' % prefix, allow_pickle=True)
idx_vt = np.load('%s_idx_over_65.npy' % prefix, allow_pickle=True)
else:
idx_train = np.load('%s_idx_over_65.npy' % prefix, allow_pickle=True)
idx_vt = np.load('%s_idx_under_65.npy' % prefix, allow_pickle=True)
elif split_type == 'gender':
if reverse == False:
idx_train = np.load('%s_idx_male.npy' % prefix, allow_pickle=True)
idx_vt = np.load('%s_idx_female.npy' % prefix, allow_pickle=True)
else:
idx_train = np.load('%s_idx_female.npy' % prefix, allow_pickle=True)
idx_vt = np.load('%s_idx_male.npy' % prefix, allow_pickle=True)
np.random.shuffle(idx_train)
np.random.shuffle(idx_vt)
train_data = [total_dataset[i] for i in idx_train]
test_data = [total_dataset[i] for i in idx_vt]
record_id, tt, vals, mask, labels = train_data[0]
input_dim = vals.size(-1)
data_min, data_max = get_data_min_max(total_dataset, device)
batch_size = 128
if flag:
if args.classif:
if split_type == 'random':
train_data, val_data = model_selection.train_test_split(train_data, train_size=0.8889, shuffle=True) # 80% train, 10% validation, 10% test
elif split_type == 'age' or split_type == 'gender':
val_data, test_data = model_selection.train_test_split(test_data, train_size=0.5, shuffle=False)
if dataset == 'P12':
num_all_features = 36
elif dataset == 'P19':
num_all_features = 34
elif dataset == 'eICU':
num_all_features = 14
elif dataset == 'PAM':
num_all_features = 17
num_missing_features = round(missing_ratio * num_all_features)
if feature_removal_level == 'sample':
for i, tpl in enumerate(val_data):
idx = np.random.choice(num_all_features, num_missing_features, replace=False)
_, _, values, _, _ = tpl
tpl = list(tpl)
values[:, idx] = torch.zeros(values.shape[0], num_missing_features)
tpl[2] = values
val_data[i] = tuple(tpl)
for i, tpl in enumerate(test_data):
idx = np.random.choice(num_all_features, num_missing_features, replace=False)
_, _, values, _, _ = tpl
tpl = list(tpl)
values[:, idx] = torch.zeros(values.shape[0], num_missing_features)
tpl[2] = values
test_data[i] = tuple(tpl)
elif feature_removal_level == 'set':
if dataset == 'P12':
dict_params = train_dataset_obj_1.params_dict
density_scores_names = np.load('../saved/IG_density_scores_P12.npy', allow_pickle=True)[:, 1]
idx = [dict_params[name] for name in density_scores_names[:num_missing_features]]
elif dataset == 'P19':
labels_ts = np.load('../../../P19data/processed_data/labels_ts.npy', allow_pickle=True)
dict_params = {label: i for i, label in enumerate(labels_ts[:-1])}
density_scores_names = np.load('../saved/IG_density_scores_P19.npy', allow_pickle=True)[:, 1]
idx = [dict_params[name] for name in density_scores_names[:num_missing_features]]
elif dataset == 'eICU':
labels_ts = np.load('../../../eICUdata/processed_data/eICU_ts_vars.npy', allow_pickle=True)
dict_params = {label: i for i, label in enumerate(labels_ts)}
density_scores_names = np.load('../saved/IG_density_scores_eICU.npy', allow_pickle=True)[:, 1]
idx = [dict_params[name] for name in density_scores_names[:num_missing_features]]
elif dataset == 'PAM':
density_scores_indices = np.load('../saved/IG_density_scores_PAM.npy', allow_pickle=True)[:, 0]
idx = list(map(int, density_scores_indices[:num_missing_features]))
for i, tpl in enumerate(val_data):
_, _, values, _, _ = tpl
tpl = list(tpl)
values[:, idx] = torch.zeros(values.shape[0], num_missing_features)
tpl[2] = values
val_data[i] = tuple(tpl)
for i, tpl in enumerate(test_data):
_, _, values, _, _ = tpl
tpl = list(tpl)
values[:, idx] = torch.zeros(values.shape[0], num_missing_features)
tpl[2] = values
test_data[i] = tuple(tpl)
if upsampling_batch:
train_data_upsamled = []
true_labels = np.array(list(map(lambda x: float(x[7]), np.array(train_data)[:, 4])))
if dataset == 'P12' or dataset == 'P19' or dataset == 'eICU': # 2 classes
idx_0 = np.where(true_labels == 0)[0]
idx_1 = np.where(true_labels == 1)[0]
for _ in range(len(true_labels) // batch_size):
indices = random_sample(idx_0, idx_1, batch_size)
for i in indices:
train_data_upsamled.append(train_data[i])
elif dataset == 'PAM': # 8 classes
for b in range(len(true_labels) // batch_size):
indices = random_sample_8(true_labels, batch_size)
for i in indices:
train_data_upsamled.append(train_data[i])
train_data = train_data_upsamled
test_data_combined = variable_time_collate_fn(test_data, device, classify=args.classif, data_min=data_min, data_max=data_max)
train_data_combined = variable_time_collate_fn(train_data, device, classify=args.classif, data_min=data_min, data_max=data_max)
val_data_combined = variable_time_collate_fn(
val_data, device, classify=args.classif, data_min=data_min, data_max=data_max)
print(train_data_combined[1].sum(
), val_data_combined[1].sum(), test_data_combined[1].sum())
print(train_data_combined[0].size(), train_data_combined[1].size(),
val_data_combined[0].size(), val_data_combined[1].size(),
test_data_combined[0].size(), test_data_combined[1].size())
train_data_combined = TensorDataset(
train_data_combined[0], train_data_combined[1].long().squeeze())
val_data_combined = TensorDataset(
val_data_combined[0], val_data_combined[1].long().squeeze())
test_data_combined = TensorDataset(
test_data_combined[0], test_data_combined[1].long().squeeze())
else:
train_data_combined = variable_time_collate_fn(
train_data, device, classify=args.classif, data_min=data_min, data_max=data_max)
train_dataloader = DataLoader(
train_data_combined, batch_size=batch_size, shuffle=False)
test_dataloader = DataLoader(
test_data_combined, batch_size=batch_size, shuffle=False)
else:
train_dataloader = DataLoader(train_data, batch_size=batch_size, shuffle=False,
collate_fn=lambda batch: variable_time_collate_fn2(batch, args, device, data_type="train",
data_min=data_min, data_max=data_max))
test_dataloader = DataLoader(test_data, batch_size=batch_size, shuffle=False,
collate_fn=lambda batch: variable_time_collate_fn2(batch, args, device, data_type="test",
data_min=data_min, data_max=data_max))
data_objects = {"dataset_obj": {},
"train_dataloader": train_dataloader,
"test_dataloader": test_dataloader,
"input_dim": input_dim,
"n_train_batches": len(train_dataloader),
"n_test_batches": len(test_dataloader),
"attr": {}, # optional
"classif_per_tp": False, # optional
"n_labels": 1} # optional
if args.classif:
val_dataloader = DataLoader(
val_data_combined, batch_size=batch_size, shuffle=False)
data_objects["val_dataloader"] = val_dataloader
return data_objects
def variable_time_collate_fn(batch, device=torch.device("cpu"), classify=False, activity=False,
data_min=None, data_max=None):
"""
Expects a batch of time series data in the form of (record_id, tt, vals, mask, labels) where
- record_id is a patient id
- tt is a 1-dimensional tensor containing T time values of observations.
- vals is a (T, D) tensor containing observed values for D variables.
- mask is a (T, D) tensor containing 1 where values were observed and 0 otherwise.
- labels is a list of labels for the current patient, if labels are available. Otherwise None.
Returns:
combined_tt: The union of all time observations.
combined_vals: (M, T, D) tensor containing the observed values.
combined_mask: (M, T, D) tensor containing 1 where values were observed and 0 otherwise.
"""
D = batch[0][2].shape[1]
# number of labels
N = batch[0][-1].shape[1] if activity else 1
len_tt = [ex[1].size(0) for ex in batch]
maxlen = np.max(len_tt)
enc_combined_tt = torch.zeros([len(batch), maxlen]).to(device)
enc_combined_vals = torch.zeros([len(batch), maxlen, D]).to(device)
enc_combined_mask = torch.zeros([len(batch), maxlen, D]).to(device)
if classify:
if activity:
combined_labels = torch.zeros([len(batch), maxlen, N]).to(device)
else:
combined_labels = torch.zeros([len(batch), N]).to(device)
for b, (record_id, tt, vals, mask, labels) in enumerate(batch):
currlen = tt.size(0)
enc_combined_tt[b, :currlen] = tt.to(device)
enc_combined_vals[b, :currlen] = vals.to(device)
enc_combined_mask[b, :currlen] = mask.to(device)
if classify:
if activity:
combined_labels[b, :currlen] = labels.to(device)
else:
if labels is not None:
combined_labels[b] = labels.to(device)
if not activity:
enc_combined_vals, _, _ = normalize_masked_data(enc_combined_vals, enc_combined_mask,
att_min=data_min, att_max=data_max)
if torch.max(enc_combined_tt) != 0.:
enc_combined_tt = enc_combined_tt / torch.max(enc_combined_tt)
combined_data = torch.cat(
(enc_combined_vals, enc_combined_mask, enc_combined_tt.unsqueeze(-1)), 2)
if classify:
return combined_data, combined_labels
else:
return combined_data
def get_activity_data(args, device):
n_samples = min(10000, args.n)
dataset_obj = PersonActivity('data/PersonActivity',
download=True, n_samples=n_samples, device=device)
print(dataset_obj)
train_data, test_data = model_selection.train_test_split(dataset_obj, train_size=0.8,
random_state=42, shuffle=True)
record_id, tt, vals, mask, labels = train_data[0]
input_dim = vals.size(-1)
batch_size = min(min(len(dataset_obj), args.batch_size), args.n)
test_data_combined = variable_time_collate_fn(test_data, device, classify=args.classif,
activity=True)
train_data, val_data = model_selection.train_test_split(train_data, train_size=0.8,
random_state=11, shuffle=True)
train_data_combined = variable_time_collate_fn(
train_data, device, classify=args.classif, activity=True)
val_data_combined = variable_time_collate_fn(
val_data, device, classify=args.classif, activity=True)
print(train_data_combined[1].sum(
), val_data_combined[1].sum(), test_data_combined[1].sum())
print(train_data_combined[0].size(), train_data_combined[1].size(),
val_data_combined[0].size(), val_data_combined[1].size(),
test_data_combined[0].size(), test_data_combined[1].size())
train_data_combined = TensorDataset(
train_data_combined[0], train_data_combined[1].long())
val_data_combined = TensorDataset(
val_data_combined[0], val_data_combined[1].long())
test_data_combined = TensorDataset(
test_data_combined[0], test_data_combined[1].long())
train_dataloader = DataLoader(
train_data_combined, batch_size=batch_size, shuffle=False)
test_dataloader = DataLoader(
test_data_combined, batch_size=batch_size, shuffle=False)
val_dataloader = DataLoader(
val_data_combined, batch_size=batch_size, shuffle=False)
data_objects = {"train_dataloader": train_dataloader,
"test_dataloader": test_dataloader,
"val_dataloader": val_dataloader,
"input_dim": input_dim,
"n_train_batches": len(train_dataloader),
"n_test_batches": len(test_dataloader),
"classif_per_tp": False, # optional
"n_labels": 1} # optional
return data_objects
def irregularly_sampled_data_gen(n=10, length=20, seed=0):
np.random.seed(seed)
obs_values, ground_truth, obs_times = [], [], []
for i in range(n):
t1 = np.sort(np.random.uniform(low=0.0, high=1.0, size=length))
t2 = np.sort(np.random.uniform(low=0.0, high=1.0, size=length))
t3 = np.sort(np.random.uniform(low=0.0, high=1.0, size=length))
a = 10 * np.random.randn()
b = 10 * np.random.rand()
f1 = .8 * np.sin(20*(t1+a) + np.sin(20*(t1+a))) + \
0.01 * np.random.randn()
f2 = -.5 * np.sin(20*(t2+a + 20) + np.sin(20*(t2+a + 20))
) + 0.01 * np.random.randn()
f3 = np.sin(12*(t3+b)) + 0.01 * np.random.randn()
obs_times.append(np.stack((t1, t2, t3), axis=0))
obs_values.append(np.stack((f1, f2, f3), axis=0))
t = np.linspace(0, 1, 100)
fg1 = .8 * np.sin(20*(t+a) + np.sin(20*(t+a)))
fg2 = -.5 * np.sin(20*(t+a + 20) + np.sin(20*(t+a + 20)))
fg3 = np.sin(12*(t+b))
ground_truth.append(np.stack((fg1, fg2, fg3), axis=0))
return obs_values, ground_truth, obs_times
def sine_wave_data_gen(args, seed=0):
np.random.seed(seed)
obs_values, ground_truth, obs_times = [], [], []
for _ in range(args.n):
t = np.sort(np.random.choice(np.linspace(
0, 1., 101), size=args.length, replace=True))
b = 10 * np.random.rand()
f = np.sin(12*(t+b)) + 0.1 * np.random.randn()
obs_times.append(t)
obs_values.append(f)
tc = np.linspace(0, 1, 100)
fg = np.sin(12*(tc + b))
ground_truth.append(fg)
obs_values = np.array(obs_values)
obs_times = np.array(obs_times)
ground_truth = np.array(ground_truth)
print(obs_values.shape, obs_times.shape, ground_truth.shape)
mask = np.ones_like(obs_values)
combined_data = np.concatenate((np.expand_dims(obs_values, axis=2), np.expand_dims(
mask, axis=2), np.expand_dims(obs_times, axis=2)), axis=2)
print(combined_data.shape)
print(combined_data[0])
train_data, test_data = model_selection.train_test_split(combined_data, train_size=0.8,
random_state=42, shuffle=True)
print(train_data.shape, test_data.shape)
train_dataloader = DataLoader(torch.from_numpy(
train_data).float(), batch_size=args.batch_size, shuffle=False)
test_dataloader = DataLoader(torch.from_numpy(
test_data).float(), batch_size=args.batch_size, shuffle=False)
data_objects = {"dataset_obj": combined_data,
"train_dataloader": train_dataloader,
"test_dataloader": test_dataloader,
"input_dim": 1,
"ground_truth": np.array(ground_truth)}
return data_objects
def kernel_smoother_data_gen(args, alpha=100., seed=0, ref_points=10):
np.random.seed(seed)
obs_values, ground_truth, obs_times = [], [], []
for _ in range(args.n):
key_values = np.random.randn(ref_points)
key_points = np.linspace(0, 1, ref_points)
query_points = np.sort(np.random.choice(
np.linspace(0, 1., 101), size=args.length, replace=True))
weights = np.exp(-alpha*(np.expand_dims(query_points,
1) - | np.expand_dims(key_points, 0) | numpy.expand_dims |
from pixell import enmap, curvedsky, fft as enfft, sharp, wcsutils
from enlib import array_ops, bench
from soapack import interfaces as sints
from optweight import alm_c_utils
import numpy as np
from scipy.interpolate import interp1d, RectBivariateSpline
from scipy import ndimage
import numba
import healpy as hp
from astropy.io import fits
import yaml
import pkgutil
from concurrent import futures
import multiprocessing
import os
import hashlib
# Utility functions to support tiling classes and functions. Just keeping code organized so I don't get whelmed.
# copied from soapack.interfaces
def config_from_yaml_file(filename):
"""Returns a dictionary from a yaml file given by absolute filename.
"""
with open(filename) as f:
config = yaml.safe_load(f)
return config
def config_from_yaml_resource(resource):
"""Returns a dictionary from a yaml file given by the resource name (relative to tacos package).
"""
f = pkgutil.get_data('mnms', resource).decode()
config = yaml.safe_load(f)
return config
def get_default_data_model():
"""Returns a soapack.interfaces.DataModel instance depending on the
name of the data model specified in the users's soapack config
'mnms' block, under key 'default_data_model'.
Returns
-------
soapack.interfaces.DataModel instance
An object used for loading raw data from disk.
"""
config = sints.dconfig['mnms']
dm_name = config['default_data_model']
# capitalize all chars except v3
dm_name = ''.join(
[dm_name[i].upper() if dm_name[i] != 'v' else 'v' for i in range(len(dm_name))]
)
return getattr(sints, dm_name)()
def get_default_mask_version():
"""Returns the mask version (string) depending on what is specified
in the user's soapack config. If the 'mnms' block has a key
'default_mask_version', return the value of that key. If not, return
the 'default_mask_version' from the user's default data model block.
Returns
-------
str
A default mask version string to help find an analysis mask. With
no arguments, a NoiseModel constructor will use this mask version
to seek the analysis mask in directory mask_path/mask_version.
"""
config = sints.dconfig['mnms']
try:
mask_version = config['default_mask_version']
except KeyError:
dm_name = config['default_data_model'].lower()
config = sints.dconfig[dm_name]
mask_version = config['default_mask_version']
return mask_version
def get_nsplits_by_qid(qid, data_model):
"""Get the number of splits in the raw data corresponding to this array 'qid'"""
return int(data_model.adf[data_model.adf['#qid']==qid]['nsplits'])
def slice_geometry_by_pixbox(ishape, iwcs, pixbox):
pb = np.asarray(pixbox)
return enmap.slice_geometry(
ishape[-2:], iwcs, (slice(*pb[:, -2]), slice(*pb[:, -1])), nowrap=True
)
def slice_geometry_by_geometry(ishape, iwcs, oshape, owcs):
pb = enmap.pixbox_of(iwcs, oshape, owcs)
return enmap.slice_geometry(
ishape[-2:], iwcs, (slice(*pb[:, -2]), slice(*pb[:, -1])), nowrap=True
)
# users must be careful with indices shape. it gets promoted to 2D array by
# prepending a dimension if necessary. afterward, indices[i] (ie, indexing along
# axis=0 of 2D indices array) corresponds to axis[i]
def get_take_indexing_obj(arr, indices, axis=None):
if axis is not None:
shape = arr.shape
slicelist = [slice(dim) for dim in shape]
axis = np.atleast_1d(axis)
indices = atleast_nd(indices, 2)
for i in range(len(axis)):
slicelist[axis[i]] = tuple(indices[i])
return tuple(slicelist)
else:
return np.array(indices)
# this is like enmap.partial_flatten except you give the axes you *want* to flatten
def flatten_axis(imap, axis=None, pos=0):
if axis is None:
return imap.reshape(-1)
else:
imap = np.moveaxis(imap, axis, range(len(axis))) # put the axes to be flattened in front
imap = imap.reshape((-1,) + imap.shape[len(axis):]) # flatten the front dimensions
imap = np.moveaxis(imap, 0, pos) # put front dimension in pos
return imap
# this is like enmap.partial_expand except you give the axes you *want* to restore
# shape is the shape of the restored axes or the full original array with restored axes
def unflatten_axis(imap, shape, axis=None, pos=0):
if axis is None:
return imap
else:
imap = np.moveaxis(imap, pos, 0) # put pos in front dimension
shape = np.atleast_1d(shape)
axis = np.atleast_1d(axis)
if len(shape) == len(axis):
shape = tuple(shape)
elif len(shape) == len(axis) + len(imap.shape[1:]):
shape = tuple(shape[axis])
else:
raise ValueError('Shape arg must either be the same length as axis or the restored array shape')
imap = imap.reshape(shape + imap.shape[1:]) # expand the front dimension into restored shape
imap = np.moveaxis(imap, range(len(axis)), axis) # put the front restored axes into correct positions
return imap
def atleast_nd(arr, n, axis=None):
arr = np.asanyarray(arr)
if (axis is None) or (arr.ndim >= n):
oaxis=tuple(range(n - arr.ndim)) # prepend the dims or do nothing in n < arr.ndim
else:
axis = np.atleast_1d(axis)
assert (n - arr.ndim) >= len(axis), 'More axes than dimensions to add'
oaxis = tuple(range(n - arr.ndim - len(axis))) + tuple(axis) # prepend the extra dims
return np.expand_dims(arr, oaxis)
def triu_indices(N):
"""Gives the upper triangular indices of an NxN matrix in diagonal-major order
(compatible with healpy spectra ordering with new=True)
Parameters
----------
N : int
Side-length of matrix
Returns
-------
tuple of ndarray
A row array and column array of ints, such that if arr is NxN, then
arr[triu_indices(N)] gives the 1D elements of the upper triangle of arr
in diagonal-major order.
"""
num_idxs = N*(N+1)//2
rows = []
cols = []
rowoffset = 0
coloffset = 0
for i in range(num_idxs):
rownum = i - rowoffset
colnum = rownum + coloffset
rows.append(rownum)
cols.append(colnum)
if colnum >= N-1: # N-1 is last column
rowoffset = i+1 # restart rows next iteration
coloffset += 1 # cols start one higher
return np.array(rows), np.array(cols)
def triu_indices_1d(N):
idxs = np.arange(N**2).reshape(N, N)
return idxs[triu_indices(N)]
def is_triangular(i):
return np.roots((1, 1, -2*i))[1].is_integer()
def triangular(N):
return N*(N+1)//2
def triangular_idx(i):
assert is_triangular(i), 'Arg must be a triangular number'
return np.roots((1, 1, -2*i)).astype(int)[1]
def triu_pos(i, N):
return np.stack(triu_indices(N))[:, i]
def triu_to_symm(arr, copy=False, axis1=0, axis2=1):
assert arr.shape[axis1] == arr.shape[axis2], 'Axes of matrix must at axis1, axis2 dimension must have same size'
if copy:
arr = arr.copy()
ncomp = arr.shape[axis1]
for i in range(ncomp):
for j in range(i+1, ncomp): # i+1 to only do upper triangle, not diagonal
getslice = get_take_indexing_obj(arr, [[i], [j]], axis=(axis1, axis2))
setslice = get_take_indexing_obj(arr, [[j], [i]], axis=(axis1, axis2))
arr[setslice] = arr[getslice]
return arr
# we can collapse a symmetric 2D matrix whose first axis is passed as the arg
# into a 1D list of the upper triangular components.
# this will clobber if axis1 or axis2 is in map axes, see __array_wrap__
def to_flat_triu(arr, axis1=0, axis2=None, flat_triu_axis=None):
# force axes to be positive
axis1 = axis1%arr.ndim
if axis2 is None:
axis2 = axis1+1
axis2 = axis2%arr.ndim
assert axis1 < axis2
assert arr.shape[axis1] == arr.shape[axis2], 'Axes of matrix at axis1, axis2 dimension must have same size'
ncomp = arr.shape[axis1]
# put flattened dims at flat_triu_axis position
if flat_triu_axis is None:
flat_triu_axis = axis1
arr = flatten_axis(arr, axis=(axis1, axis2), pos=flat_triu_axis)
# get triangular indices for axis to be expanded
slice_obj = get_take_indexing_obj(arr, triu_indices_1d(ncomp), axis=flat_triu_axis)
return arr[slice_obj]
# this clobbers ndarray subclasses because of call to append!
# axis2 is position in expanded array, while flat_triu_axis is position in passed array
def from_flat_triu(arr, axis1=0, axis2=None, flat_triu_axis=None):
if flat_triu_axis is None:
flat_triu_axis = axis1
# force axes to be positive
axis1 = axis1%arr.ndim
if axis2 is None:
axis2 = axis1+1
axis2 = axis2%(arr.ndim+1)
assert axis1 < axis2
assert is_triangular(arr.shape[flat_triu_axis]), 'flat_triu_axis length must be a triangular number'
# place in new array with ncomp**2 instead of n_triu at flat_triu_axis
ncomp = triangular_idx(arr.shape[flat_triu_axis])
ishape = list(arr.shape)
ishape[flat_triu_axis] = ncomp**2 - triangular(ncomp) # this will be concatenated to arr
arr = np.append(arr, np.zeros(ishape), axis=flat_triu_axis).astype(arr.dtype)
# get triangular indices for axis to be expanded
getslice = get_take_indexing_obj(arr, np.arange(triangular(ncomp)), axis=flat_triu_axis)
setslice = get_take_indexing_obj(arr, triu_indices_1d(ncomp), axis=flat_triu_axis)
arr[setslice] = arr[getslice]
# reshape into correct shape
arr = unflatten_axis(arr, (ncomp, ncomp), axis=(axis1, axis2), pos=flat_triu_axis)
return triu_to_symm(arr, axis1=axis1, axis2=axis2)
# get a logical mask that can be used to index an array, eg to build a mask
# out of conditions.
# if not keep_prepend_dims, perform op over all dims up to map dims; else
# perform over specified axes
def get_logical_mask(cond, op=np.logical_or, keep_prepend_dims=False, axis=0):
if not keep_prepend_dims:
axis = tuple(range(cond.ndim - 2))
return op.reduce(cond, axis=axis)
def get_coadd_map(imap, ivar):
"""Return sum(imap[i]*ivar[i]) / sum(ivar[i]), where
each sum is over splits.
Parameters
----------
imap : (..., nsplit, 3, ny, nx) ndmap
Data maps for N splits.
ivar : (..., nsplit, 1, ny, nx) ndmap
Inverse variance maps for N splits.
Returns
-------
coadd : (..., 1, 3, ny, nx) ndmap
Inverse variance weighted coadd over splits, with
singleton dimension in split axis.
"""
if hasattr(imap, 'wcs'):
is_enmap = True
wcs = imap.wcs
else:
is_enmap = False
imap = atleast_nd(imap, 4) # make 4d by prepending
ivar = atleast_nd(ivar, 4)
# due to floating point precision, the coadd is not exactly the same
# as a split where that split is the only non-zero ivar in that pixel
num = np.sum(imap * ivar, axis=-4, keepdims=True)
den = np.sum(ivar, axis=-4, keepdims=True)
mask = den != 0
coadd = np.zeros_like(num)
np.divide(num, den, where=mask, out=coadd)
# find pixels where exactly one split has a nonzero ivar
single_nonzero_ivar_mask = np.sum(ivar!=0, axis=-4, keepdims=True) == 1
# set the coadd in those pixels to be equal to the imap value of that split (ie, avoid floating
# point errors in naive coadd calculation)
single_nonzero_fill = np.sum(imap * (ivar!=0), axis=-4, where=single_nonzero_ivar_mask, keepdims=True)
coadd = np.where(single_nonzero_ivar_mask, single_nonzero_fill, coadd)
if is_enmap:
coadd = enmap.ndmap(coadd, wcs)
return coadd
def get_ivar_eff(ivar, use_inf=False):
"""
Return ivar_eff = 1 / (1 / ivar - 1 / sum_ivar),
where sum_ivar is the sum over splits.
Parameters
----------
ivar : (..., nsplit, 1, ny, nx) ndmap
Inverse variance maps for N splits.
use_inf : bool, optional
If set, use np.inf for values that approach infinity,
instead of large numerical values.
Returns
-------
ivar_eff : (..., nsplit, 1, ny, nx) enmap
Ivar_eff for each split.
"""
# Make 4d by prepending splits along -4 axis.
ivar = atleast_nd(ivar, 4)
# We want to calculate 1 / (1/ivar - 1/sum(ivar). It easier to do
# ivar * sum(ivar) / (sum(ivar) - ivar) to avoid (some) divisions by zero.
sum_ivar = np.sum(ivar, axis=-4, keepdims=True)
num = sum_ivar * ivar # Numerator.
den = sum_ivar - ivar # Denominator.
# In pixels were ivar == sum_ivar we get inf.
mask = den != 0
out = np.divide(num, den, where=mask, out=num)
if use_inf:
out[~mask] = np.inf
else:
# Fill with largest value allowed by dtype to mimic np.nan_to_num.
out[~mask] = np.finfo(out.dtype).max
return out
def get_corr_fact(ivar):
'''
Get correction factor sqrt(ivar_eff / ivar) that converts a draw from
split difference d_i to a draw from split noise n_i.
Parameters
----------
ivar : (..., nsplit, 1, ny, nx) enmap
Inverse variance maps for N splits.
Returns
-------
corr_fact : (..., nsplit, 1, ny, nx) enmap
Correction factor for each split.
'''
corr_fact = get_ivar_eff(ivar, use_inf=True)
corr_fact[~np.isfinite(corr_fact)] = 0
np.divide(corr_fact, ivar, out=corr_fact, where=ivar!=0)
corr_fact[ivar==0] = 0
corr_fact **= 0.5
return corr_fact
def get_noise_map(imap, ivar):
return imap - get_coadd_map(imap, ivar)
def get_whitened_noise_map(imap, ivar):
return get_noise_map(imap, ivar)*np.sqrt(get_ivar_eff(ivar))
def rolling_average(x, N):
cumsum = np.cumsum(np.insert(x, 0, 0))
return (cumsum[N:] - cumsum[:-N]) / float(N)
def linear_crossfade(cNy,cNx,npix_y,npix_x=None, dtype=np.float32):
if npix_x is None:
npix_x = npix_y
fys = | np.ones(cNy, dtype=dtype) | numpy.ones |
import numpy as np
a = np.array([1, 3, 6, 9, -3, -6])
b = np.array([1, 2, 3, 40, -9, 5])
# 1.
print(np.add(a, b))
# 2.
print(np.subtract(a, b))
# 3.
print(np.multiply(a, b))
# 4.
print(np.divide(a, b))
# 5.
print(np.mod(a, b))
# 6.
print(np.remainder(a, b))
# 7.
print(np.divmod(a, b))
# 8.
print(np.absolute(a))
print(np.absolute(b))
# 9.
a = | np.array([1, 3, 6, 9, 3, 6]) | numpy.array |
import numpy as np
import pytest
from snc.environments.job_generators.discrete_review_job_generator \
import DeterministicDiscreteReviewJobGenerator as drjg
from snc.environments.job_generators.discrete_review_job_generator \
import PoissonDiscreteReviewJobGenerator as prjg
from snc.environments.controlled_random_walk import ControlledRandomWalk
import snc.utils.snc_tools as snc
import snc.environments.state_initialiser as stinit
import snc.environments.examples as examples
import \
snc.environments.examples_distribution_with_rebalancing as examples_distribution_with_rebalancing
from snc.environments.job_generators.scaled_bernoulli_services_poisson_arrivals_generator import \
ScaledBernoulliServicesPoissonArrivalsGenerator
from snc.environments.state_initialiser import DeterministicCRWStateInitialiser
def test_is_binary():
c = np.ones((1, 1))
assert (snc.is_binary(c))
c = np.zeros((1, 1))
assert (snc.is_binary(c))
c = np.ones((5, 4))
assert (snc.is_binary(c))
c = np.zeros((5, 4))
assert (snc.is_binary(c))
c = np.random.randint(0, 1, (3, 6))
assert (snc.is_binary(c))
c = []
assert (not snc.is_binary(c))
c = np.random.random_sample((3, 5))
c[0] = 1
assert (not snc.is_binary(c))
def test_index_phys_resources_with_negative_values():
index_phys_resources = (-1, 0)
# Other needed parameters
cost_per_buffer = np.zeros((2, 1))
demand_rate = np.zeros((2, 1))
initial_state = np.zeros((2, 1))
capacity = np.zeros((2, 1))
constituency_mat = np.eye(2)
buffer_processing_mat = np.eye(2)
job_generator = drjg(demand_rate, buffer_processing_mat, sim_time_interval=1)
s0 = stinit.DeterministicCRWStateInitialiser(initial_state)
with pytest.raises(AssertionError):
ControlledRandomWalk(cost_per_buffer, capacity, constituency_mat, job_generator, s0,
index_phys_resources=index_phys_resources)
def test_index_phys_resources_with_index_higher_than_num_resources():
index_phys_resources = (0, 2)
# Other needed parameters
cost_per_buffer = np.zeros((2, 1))
demand_rate = np.zeros((2, 1))
initial_state = np.zeros((2, 1))
capacity = np.zeros((2, 1))
constituency_mat = np.eye(2)
buffer_processing_mat = np.eye(2)
job_generator = drjg(demand_rate, buffer_processing_mat, sim_time_interval=1)
s0 = stinit.DeterministicCRWStateInitialiser(initial_state)
with pytest.raises(AssertionError):
ControlledRandomWalk(cost_per_buffer, capacity, constituency_mat, job_generator, s0,
index_phys_resources=index_phys_resources)
def test_index_phys_resources_with_repeated_indexes():
index_phys_resources = (0, 0)
# Other needed parameters
cost_per_buffer = np.zeros((2, 1))
demand_rate = np.zeros((2, 1))
initial_state = np.zeros((2, 1))
capacity = np.zeros((2, 1))
constituency_mat = np.eye(2)
buffer_processing_mat = np.eye(2)
job_generator = drjg(demand_rate, buffer_processing_mat, sim_time_interval=1)
s0 = stinit.DeterministicCRWStateInitialiser(initial_state)
with pytest.raises(AssertionError):
ControlledRandomWalk(cost_per_buffer, capacity, constituency_mat, job_generator, s0,
index_phys_resources=index_phys_resources)
def test_valid_index_phys_resources_1():
index_phys_resources = (0,)
# Other needed parameters
cost_per_buffer = np.zeros((2, 1))
demand_rate = np.zeros((2, 1))
initial_state = np.zeros((2, 1))
capacity = np.zeros((2, 1))
constituency_mat = np.eye(2)
buffer_processing_mat = np.eye(2)
job_generator = drjg(demand_rate, buffer_processing_mat, sim_time_interval=1)
s0 = stinit.DeterministicCRWStateInitialiser(initial_state)
env = ControlledRandomWalk(cost_per_buffer, capacity, constituency_mat, job_generator, s0,
index_phys_resources=index_phys_resources)
assert env.index_phys_resources == index_phys_resources
def test_valid_index_phys_resources_1_2():
index_phys_resources = (0, 1)
# Other needed parameters
cost_per_buffer = np.zeros((2, 1))
demand_rate = np.zeros((2, 1))
initial_state = np.zeros((2, 1))
capacity = np.zeros((2, 1))
constituency_mat = np.eye(2)
buffer_processing_mat = np.eye(2)
job_generator = drjg(demand_rate, buffer_processing_mat, sim_time_interval=1)
s0 = stinit.DeterministicCRWStateInitialiser(initial_state)
env = ControlledRandomWalk(cost_per_buffer, capacity, constituency_mat, job_generator, s0,
index_phys_resources=index_phys_resources)
assert env.index_phys_resources == index_phys_resources
def test_state_initialiser_uniform():
num_buffers = 5
capacity = 10
s0 = stinit.UniformRandomCRWStateInitialiser(num_buffers, capacity)
init_state = np.zeros((num_buffers, 1))
num_samples = 100000
for i in range(num_samples):
init_state += s0.get_initial_state()
init_state /= num_samples
np.all(np.isclose(init_state, ((capacity - 1) / 2) * np.ones((num_buffers, 1))))
def test_state_initialiser_deterministic():
initial_state = np.array([2, 3, 4, 5])[:, None]
s0 = stinit.DeterministicCRWStateInitialiser(initial_state)
assert np.all(s0.get_initial_state() == initial_state)
def test_unfeasible_action():
"""Check that the step method doesn't allow actions that violate the one action per resource
constraint: C u <= 1."""
np.random.seed(42)
env = examples.double_reentrant_line_only_shared_resources_model(alpha=0)
env.reset_with_random_state(42)
action = | np.array([[1], [0], [0], [1]]) | numpy.array |
import cv2
import matplotlib.pyplot as plt
import numpy as np
kernel = | np.ones((5, 5), np.uint8) | numpy.ones |
import numpy as np
from typing import *
from numpy.typing import ArrayLike
from scipy.spatial import Delaunay
from tallem.utility import ask_package_install, package_exists
def flywing():
''' Fly wings example (Klingenberg, 2015 | https://en.wikipedia.org/wiki/Procrustes_analysis) '''
arr1 = np.array([[588.0, 443.0], [178.0, 443.0], [56.0, 436.0], [50.0, 376.0], [129.0, 360.0], [15.0, 342.0], [92.0, 293.0], [79.0, 269.0], [276.0, 295.0], [281.0, 331.0], [785.0, 260.0], [754.0, 174.0], [405.0, 233.0], [386.0, 167.0], [466.0, 59.0]])
arr2 = np.array([[477.0, 557.0], [130.129, 374.307], [52.0, 334.0], [67.662, 306.953], [111.916, 323.0], [55.119, 275.854], [107.935, 277.723], [101.899, 259.73], [175.0, 329.0], [171.0, 345.0], [589.0, 527.0], [591.0, 468.0], [299.0, 363.0], [306.0, 317.0], [406.0, 288.0]])
return([arr1, arr2])
def gaussian_blob(n_pixels: int, r: float):
'''
Generates a closure which, given a 2D location *mu=(x,y)*, generates a white blob
with [normalized] radius 0 < r <= 1 in a (n_pixels x n_pixels) image.
If *mu* is in [0,1] x [0,1], the center of the white blob should be visible
If *mu* has as both of its coordinates outside of [0,1]x[0,1], the blob may be partially visible
If *mu* has both of its coordinates outside of [-r, 1+r]x[-r, 1+r], then image should be essentially black
The returned closure completely autograd's numpy wrapper to do the image generation. Thus, the resulting
function can be differentiated (w.r.t *mu*) using the reverse-mode differentiation process that *autograd* provides.
This function also returns the global normalizing constant needed normalize the pixel intensities in [0,1],
for plotting or other purposes.
Return: (blob, c) where
- blob := differentiable closure which, given a vector (x,y), generates the blob image a flat vector.
- c := maximum value of the intensity of any given pixel for any choice of *mu*.
'''
import autograd.numpy as auto_np
sd = r/3.090232
sigma = sd**2
sigma_inv = 1.0/sigma
denom = np.sqrt(((2*auto_np.pi)**2) * (sigma**2))
def blob(mu): # mu can be anywhere; center of image is [0.5, 0.5]
loc = auto_np.linspace(0, 1, n_pixels, False) + 1/(2*n_pixels)
x,y = auto_np.meshgrid(loc, loc)
grid = auto_np.exp(-0.5*(sigma_inv * ((x-mu[0])**2 + (y-mu[1])**2)))/denom
return(auto_np.ravel(grid).flatten())
return(blob, auto_np.exp(0)/denom)
def plot_image(P, figsize=(8,8), max_val = "default"):
if max_val == "default": max_val = np.max(P)
import matplotlib.pyplot as plt
fig = plt.figure(figsize=figsize)
plt.imshow(P, cmap='gray', vmin=0, vmax=max_val)
fig.gca().axes.get_xaxis().set_visible(False)
fig.gca().axes.get_yaxis().set_visible(False)
def plot_images(P, shape, max_val = "default", figsize=(8,8), layout = None):
'''
P := numpy array where each row is a grayscale image
shape := the shape to reshape each row of P prior to plotting
'''
import matplotlib.pyplot as plt
if max_val == "default":
max_val = np.max(P)
if P.ndim == 1:
fig = plt.figure(figsize=figsize)
plt.imshow(P.reshape(shape), cmap='gray', vmin=0, vmax=max_val)
fig.gca().axes.get_xaxis().set_visible(False)
fig.gca().axes.get_yaxis().set_visible(False)
return(fig, ax)
else:
assert layout is not None, "missing layout"
fig, axs = plt.subplots(*layout, figsize=figsize)
axs = axs.flatten()
for i, (img, ax) in enumerate(zip(P, axs)):
#fig.add_subplot(layout[0], layout[1], i+1)
plt.axis("off")
ax.imshow(P[i,:].reshape(shape), cmap='gray', vmin=0, vmax=max_val, aspect='auto')
ax.axes.xaxis.set_visible(False)
ax.axes.yaxis.set_visible(False)
plt.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=0.1, hspace=0.1)
return(fig, axs)
def scatter2D(P, layout = None, figsize=(8,8), **kwargs):
import matplotlib.pyplot as plt
if isinstance(P, np.ndarray):
if "fig" in kwargs.keys() and "ax" in kwargs.keys():
fig, ax = kwargs["fig"], kwargs["ax"]
kwargs.pop('fig', None)
kwargs.pop('ax', None)
else:
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot()
ax.scatter(*P.T, **kwargs)
return(fig, ax)
elif isinstance(P, Iterable):
assert layout is not None, "missing layout"
assert len(P) == np.prod(layout)
if "fig" in kwargs.keys() and "ax" in kwargs.keys():
fig, ax = kwargs["fig"], kwargs["ax"]
kwargs.pop('fig', None)
else:
fig = plt.figure(figsize=figsize)
for i, p in enumerate(P):
ax = fig.add_subplot(layout[0], layout[1], i+1)
ax.scatter(*p.T, **kwargs)
return(fig, ax)
def scatter3D(P, angles = None, layout = None, figsize=(8,8), **kwargs):
import matplotlib.pyplot as plt
if isinstance(P, np.ndarray):
import numbers
if angles is not None:
if isinstance(angles, numbers.Integral):
angles = np.linspace(0, 360, angles, endpoint=False)
assert len(angles) == np.prod(layout)
if "fig" in kwargs.keys() and "ax" in kwargs.keys():
fig, ax = kwargs["fig"], kwargs["ax"]
kwargs.pop('fig', None)
kwargs.pop('ax', None)
else:
fig, ax = plt.subplots(*layout, figsize=figsize)
for i, theta in enumerate(angles):
ax = fig.add_subplot(layout[0], layout[1], i+1, projection='3d')
ax.scatter3D(*P.T, **kwargs)
ax.view_init(30, theta)
else:
if "fig" in kwargs.keys() and "ax" in kwargs.keys():
fig, ax = kwargs["fig"], kwargs["ax"]
kwargs.pop('fig', None)
kwargs.pop('ax', None)
else:
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(projection='3d')
ax.scatter3D(*P.T, **kwargs)
elif isinstance(P, Iterable):
import numbers
assert layout is not None, "missing layout"
if angles is None:
angles = np.repeat(60, len(P))
elif isinstance(angles, numbers.Integral):
angles = np.linspace(0, 2*np.pi, len(P), endpoint=False)
assert len(angles) == np.prod(layout)
if "fig" in kwargs.keys() and "ax" in kwargs.keys():
fig, ax = kwargs["fig"], kwargs["ax"]
kwargs.pop('fig', None)
kwargs.pop('ax', None)
else:
fig, ax = plt.subplots(*layout, figsize=figsize)
for i, p in enumerate(P):
ax = fig.add_subplot(layout[0], layout[1], i+1, projection='3d')
ax.scatter3D(*p.T, **kwargs)
ax.view_init(30, angles[i])
plt.setp(plt.gcf().get_axes(), xticks=[], yticks=[]);
return(fig, ax)
def rotating_disk(n_pixels: int, r: float, sigma: float = 1.0):
from scipy.ndimage import gaussian_filter
import numpy as np
I = np.zeros(shape=(n_pixels, n_pixels))
p = np.linspace(0, 1, n_pixels, False) + 1/(2*n_pixels) # center locations of pixels, in normalized space
z = np.array([r, 0.0]).reshape((2,1))
d = np.array([0.5, 0.5]).reshape((2,1))
x,y = np.meshgrid(p, p)
def disk_image(theta: float):
R = np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]])
c = (R @ z) + d # center of disk in [0,1]^2
D = np.flipud(np.sqrt((x - c[0])**2 + (y - c[1])**2))
D[D <= r] = -1.0
D[D > r] = 0.0
D[D == -1.0] = 1.0
return(np.ravel(gaussian_filter(D, sigma=1.0)).flatten())
return(disk_image, 1.0)
# D = np.zeros(np.prod(x.shape))
# for i, (xi,yi) in enumerate(zip(x.flatten(),y.flatten())):
# p = np.array([xi,yi])
# D[i] = np.dot(p-b, u)# np.linalg.norm(z-v)
def white_bars(n_pixels: int, r: float, sigma: float = 1.0):
'''
Returns a parameterization that yields a white vertical bar at various orientations in an image.
Fixed parameters:
n_pixels := number of pixels to make square image
r := constant between [0,1] indicating how wide to make the bar
sigma := kernel parameter for gaussian blur
Returns:
bar := closure w/ parameters y_offset in [0, 1] and theta in [0, pi]
c := normalizing constant for plotting
'''
from scipy.ndimage import gaussian_filter
import numpy as np
w = r
p = np.linspace(0, 1, n_pixels, False) + 1/(2*n_pixels) # center locations of pixels, in normalized space
x,y = np.meshgrid(p,p)
c = np.array([0.5, 0.5]) # center of image
def bar(theta: float, d: float):
assert np.all(np.bitwise_and(d >= -1.0, d <= 1.0)), "d must be in the range [-1, 1]"
assert np.all(np.bitwise_and(theta >= 0.0, theta <= np.pi)), "theta must be in the range [0, pi]"
u = np.array([ 1.0, np.tan(theta) ])
u = u / np.linalg.norm(u)
c = np.array([0.5, 0.5]) # center of image
d = d * (np.sqrt(2) / 2) # scale where to place center of bar
if theta > np.pi/2:
d = -d
b = c + d*u # center of bar
D = np.zeros(np.prod(x.shape))
for i, (xi,yi) in enumerate(zip(x.flatten(),y.flatten())):
p = np.array([xi,yi])
D[i] = np.dot(p-b, u)# np.linalg.norm(z-v)
I = abs(D.reshape((n_pixels, n_pixels))).T
I[I > w] = 1
I = 1.0 - I
return(gaussian_filter(I, sigma=sigma))
c = np.max(bar(0.0, 0.0))
return(bar, c)
# u = np.array([ 1.0, np.tan(theta) ])
# u = u / np.linalg.norm(u)
# d = np.array([-di if ti <= np.pi/2 else di for di,ti in zip(d, theta)])*(np.sqrt(2) / 2)
# U = np.c_[np.repeat(1.0, len(theta)), theta]
# U = U / np.linalg.norm(U, axis = 1, keepdims = True)
# B = c + d.reshape((len(d), 1)) * U # center of bars
# D = [abs((x - b[0])*u[0] + (y - b[1])*u[1]).T for (u, b) in zip(U, B)]
# # b = c + d*u # center of bar
# # D = (x - b[0])*u[0] + (y - b[1])*u[1]
# # I = abs(D.reshape((n_pixels, n_pixels))).T
# images = np.zeros((B.shape[0], n_pixels**2))
# for i, img in enumerate(D):
# img[img > w] = 1
# img = 1.0 - img
# images[i,:] = np.ravel(gaussian_filter(img, sigma=sigma).flatten())
# return(images)
# from scipy.ndimage import gaussian_filter
# import numpy as np
# w = r*np.sqrt(2)
# p = np.linspace(0, 1, n_pixels, False) + 1/(2*n_pixels) # center locations of pixels, in normalized space
# x,y = np.meshgrid(p,p)
# def bar(y_offset: float, theta: float):
# assert y_offset >= 0.0 and y_offset <= 1.0
# assert theta >= 0.0 and theta <= np.pi
# # z = np.array([0.5, y_offset]) # intercept
# # dist_to_line = np.cos(theta)*(z[1] - y) - np.sin(theta)*(z[0]-x)
# # dist_to_line = ((y - y_offset)/np.tan(theta))*np.sin(theta)
# # Z = np.array([np.array([xi,yi]) for xi,yi in zip(x.flatten(),y.flatten())])
# # fig,ax = scatter2D(Z, c="blue")
# # fig,ax = scatter2D(np.array(P), c="red", fig=fig, ax=ax)
# # fig,ax = scatter2D(np.array([0.5, 0.5]), c="green", fig=fig, ax=ax)
# # fig,ax = scatter2D(np.c_[x.flatten(), x.flatten()*m + b], c="purple", fig=fig, ax=ax)
# m, b = np.tan(theta), y_offset
# #pt = np.array([1.0, m + b])
# z1 = np.array([0.50, b])
# z2 = np.array([1.0, 1.0*m + b])
# pt = z2 - z1
# d = []
# P = []
# for xi,yi in zip(x.flatten(),y.flatten()):
# u = pt / np.linalg.norm(pt)
# v = np.array([xi,yi])
# z = u*np.dot(v-np.array([0.5, b]), u)+np.array([0.5, b])
# d.append(np.linalg.norm(z-v))
# P.append(z)
# dist_to_line = np.array(d)
# # fig, ax = scatter2D(np.array(P))
# I = abs(dist_to_line.reshape((n_pixels, n_pixels)))
# I = np.flipud(I) # make origin lower-left, not top-left
# # I = (np.sqrt(2)/2)*(I/np.max(I))
# I[I > w] = np.sqrt(2)
# I = np.sqrt(2) - I ## invert intensity
# # I[I < (np.sqrt(2) - w)] = 0.0
# # B = I.copy()
# # I[I <= w] = -1.0
# # I[I > w] = 0.0
# # I[I == -1.0] = np.max(B[B <= w]) - B[B <= w] # 1.0
# return(gaussian_filter(I, sigma=sigma))
# c = np.max(bar(0.0, 0.0))
# return(bar, c)
# def _gaussian_pixel(d, n_pixels):
# from scipy.stats import norm
# sigma = d/3.0
# Sigma = auto_np.diag([sigma, sigma])
# sigma_inv = auto_np.linalg.inv(Sigma)[0,0]
# denom = np.sqrt(((2*np.pi)**2) * auto_np.linalg.det(Sigma))
# normal_constant = norm.pdf(0, loc=0, scale=sigma)
# def blob(mu): # generates blob at location mu
# # mu = mu.reshape((2, 1))
# # np.exp(-0.5 * ((x - mu).T @ SigmaI @ (x - mu))).flatten()
# #x, y = auto_np.meshgrid(auto_np.arange(n_pixels), auto_np.arange(n_pixels))
# loc = auto_np.linspace(0, 1, n_pixels, False) + (1/(2*n_pixels))
# x,y = auto_np.meshgrid(loc, loc)
# grid = auto_np.exp(-0.5*(sigma_inv * ((x-mu[0])**2 + (y-mu[1])**2)))/denom
# #grid = auto_np.exp(-0.5*((x - mu[0])**2 + (y - mu[1])**2))/denom
# #return(auto_np.ravel(grid).flatten())
# return(grid/normal_constant)
# return(blob)
# plot_image(gaussian_pixel2(1/32, 11)([-0.5, 0.5]))
def white_dot(n_pixels: int, r: float, n: Optional[int], method: Optional[str] = "grid", mu: Optional[ArrayLike] = None):
'''
Generates a grayscale image data set where white blobs are placed on a (n_pixels x n_pixels) grid
using a multivariate normal density whose standard deviation sigma (in both directions) is sigma=d/3.
If 'n' is specified, then 'n' samples are generated from a larger space s([-d, 1+d]^2) where s(*)
denotes the scaling of the interval [-d,1+d] by 'n_pixels'.
Parameters:
n_pixels := number of pixels wide/tall to make the resulting images
r := relative radius of dot (in (0, 1])
n := (optional) number of samples desired
method := (optional) how to generate samples in the parameter space. Can be either "grid" or "random".
mu := (optional) locations of dot centers to generate the dots at
Returns:
samples := generated image samples
params := (x,y,i) parameters associated with each sample,
f := closure for generating more samples. See gaussian_blob() for more details.
c := normalizing constant. See gaussian_blob() for more details.
'''
assert r > 0 and r <= 1.0, "r must be in the range 0 < r <= 1.0"
assert isinstance(n, int) or isinstance(n, tuple), "n must be integer of tuple of integers"
ask_package_install("autograd")
import numpy as np
import autograd.numpy as auto_np
## First generate the closure to make the images
blob, c = gaussian_blob(n_pixels, r)
if not(mu is None):
samples = np.vstack([blob(auto_np.array([x,y])) for x,y in mu])
params = mu
elif method == "random":
## Generate uniformly random locations (in domain)
assert n is not None, "'n' must be supplied if 'mu' is not."
n1, n2 = (n, n) if isinstance(n, int) else (n[0], n[1])
samples, params = [], []
X, Y = np.random.uniform(size=n1,low=-r,high=1+r), np.random.uniform(size=n1,low=-r,high=1+r)
for x,y in zip(X, Y):
samples.append(blob(auto_np.array([x,y])))
params.append([x, y, 1.0])
NP = blob(auto_np.array([0.5, 0.5]))
for t in np.random.uniform(size=n2, low=0.0, high=1.0):
samples.append(t*NP)
params.append([0.5, 0.5, 1-t])
## Vertically stack
samples, params = np.vstack(samples), np.vstack(params)
elif method == "grid":
assert n is not None, "'n' must be supplied if 'mu' is not."
if isinstance(n, int):
n1, n2 = (n, n)
else:
n1, n2 = (n[0], n[1])
ng = int(np.floor(np.sqrt(n1)))
samples, params = [], []
for x in np.linspace(0.0-r,1.0+r,ng):
for y in np.linspace(0.0-r,1.0+r,ng):
samples.append(blob(auto_np.array([x, y])))
params.append([x, y, 1.0])
## Generate the pole
NP = blob(auto_np.array([0.5, 0.5]))
for t in np.linspace(0, 1, n2):
samples.append(t*NP)
params.append([0.5, 0.5, 1-t])
## Vertically stack
samples, params = np.vstack(samples), np.vstack(params)
## Return the data
return(samples, params, blob, c)
def mobius_band(n_polar=66, n_wide=9, scale_band=0.25):
'''
Generates stratified samples on a Mobius band embedded in R^3
To get uniform samples, N = (n_polar*n_wide) uniformly spaced coordinates are generated initially
from the intrinsic space of M. These points are converted to their extrinsic (3D) coordinates and
are then triangulated using a Delaunay triangulation. Finally, using the Delaunay triangles as stratum,
a stratified sampling scheme is employed by sampling randomly from each triangle using its barycentric
coordinates. This stratification ensures the samples are both sufficiently random but sufficiently "uniformly
spaced" around the band.
Returns:
- M := (n x 3) matrix of embedding coordinates
- B := (n x 2) matrix of intrinsic coordinates
In the intrinsic coordinates, B[:,0] is the width parameter and B[:,1] is the angular coordinate
'''
## Generate random (deterministic) polar coordinates around Mobius Band
np.random.seed(0)
s = np.linspace(-scale_band, scale_band, 2*n_wide) # width/radius
t = np.linspace(0, 2*np.pi, n_polar) # circular coordinate
s, t = np.meshgrid(s, t)
## Triangulate to allow stratification
M = np.c_[np.ravel(s), np.ravel(t)]
V = M[Delaunay(M).simplices]
## Sample within each strata via random barycentric coordinates
normalize = lambda x: x / np.sum(x)
Y = np.array([np.sum(v * normalize(np.random.uniform(size=(3,1))), axis = 0) for v in V])
## Convert to 3d
S, T = Y[:,0], Y[:,1]
phi = 0.5 * T
r = 1 + S * np.cos(phi)
MB = np.c_[r * np.cos(T), r * np.sin(T), S * np.sin(phi)]
## Return both 3D embedding + original parameters
return(MB, Y)
def embed(a: ArrayLike, D: int, method="givens"):
''' Embeds a point cloud into D dimensions using random orthogonal rotations '''
def givens(i,j,theta,n=2):
G = np.eye(n)
G[i,i] = np.cos(theta)
G[j,j] = np.cos(theta)
G[i,j] = -np.sin(theta)
G[j,i] = | np.sin(theta) | numpy.sin |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# @Author: <NAME> (<EMAIL>)
# @Filename: tiledb.py
# @License: BSD 3-clause (http://www.opensource.org/licenses/BSD-3-Clause)
import itertools
import astropy
import cycler
import matplotlib.pyplot as plt
import numpy
from matplotlib import animation
import lvmsurveysim.target
from lvmsurveysim.schedule.tiledb import TileDB
from lvmsurveysim.schedule.scheduler import Scheduler
from lvmsurveysim.schedule.plan import ObservingPlan
from lvmsurveysim import IFU, config
from lvmsurveysim.schedule.plan import ObservingPlan
from lvmsurveysim.utils.plot import __MOLLWEIDE_ORIGIN__, get_axes, transform_patch_mollweide, convert_to_mollweide
numpy.seterr(invalid='raise')
__all__ = ['Simulator']
class Simulator(object):
"""Simulates an observing schedule for a list of targets (tile database) following and observing plan.
Parameters
----------
tiledb : ~lvmsurveysim.schedule.tiledb.TileDB
The `~lvmsurveysim.schedule.tiledb.TileDB` instance with the table of
tiles to schedule.
observing_plan : l`.ObservingPlan` or None
The `.ObservingPlan` to use (one for each observatory).
If `None`, it will be created from the ``observing_plan``
section in the configuration file. Contains dates and sun/moon data for the
duration of the survey as well as Observatory data.
ifu : ~lvmsurveysim.ifu.IFU
The `~lvmsurveysim.ifu.IFU` to use. Defaults to the one from the
configuration file. Used only for plotting the survey footprint.
Attributes
----------
tiledb : ~lvmsurveysim.schedule.tiledb.TileDB
Instance of the tile database to observe.
schedule : ~astropy.table.Table
An astropy table with the results of the scheduling. Includes
information about the JD of each observation, the target observed,
the index of the pointing in the target tiling, coordinates, etc.
"""
def __init__(self, tiledb, observing_plan=None, ifu=None):
assert isinstance(tiledb, lvmsurveysim.schedule.tiledb.TileDB), \
'tiledb must be a lvmsurveysim.schedule.tiledb.TileDB instances.'
# get rid of the special tiles, we do not need them for the simulator
tdb = tiledb.tile_table
tiledb.tile_table = tdb[numpy.where(tdb['TileID'] >= tiledb.tileid_start)[0]]
if observing_plan is None:
observing_plan = self._create_observing_plan()
assert isinstance(observing_plan, ObservingPlan), 'observing_plan is not an instance of ObservingPlan.'
self.zenith_avoidance = config['scheduler']['zenith_avoidance']
self.time_step = config['scheduler']['timestep']
self.observing_plan = observing_plan
self.tiledb = tiledb
self.targets = tiledb.targets
self.ifu = ifu or IFU.from_config()
self.schedule = None
def __repr__(self):
return (f'<Simulator (observing_plan={self.observing_plan.observatory.name}, '
f'tiles={len(self.tiledb.tile_table)})>')
def save(self, path, overwrite=False):
"""
Saves the results of the scheduling simulation to a FITS file.
"""
assert isinstance(self.schedule, astropy.table.Table), \
'cannot save empty schedule. Execute Scheduler.run() first.'
targfile = str(self.targets.filename) if self.targets.filename != None else 'NA'
self.schedule.meta['targfile'] = targfile
self.schedule.write(path+'.fits', format='fits', overwrite=overwrite)
@classmethod
def load(cls, path, tiledb=None, observing_plan=None):
"""Creates a new instance from a schedule file.
Parameters
----------
path : str or ~pathlib.Path
The path to the schedule file and the basename, no extension. The
routine expects to find path.fits and path.npy
tiledb : ~lvmsurveysim.schedule.tiledb.TileDB or path-like
Instance of the tile database to observe.
observing_plan : `.ObservingPlan` or None
The `.ObservingPlan` to use (one for each observatory).
"""
schedule = astropy.table.Table.read(path+'.fits')
if not isinstance(tiledb, lvmsurveysim.schedule.tiledb.TileDB):
assert tiledb != None and tiledb != 'NA', \
'invalid or unavailable tiledb file path.'
tiledb = TileDB.load(tiledb)
observing_plan = observing_plan or []
sim = cls(tiledb, observing_plan=observing_plan)
sim.schedule = schedule
return sim
def run(self, progress_bar=True):
"""Schedules the pointings for the whole survey defined
in the observing plan.
Parameters
----------
progress_bar : bool
If `True`, shows a progress bar.
"""
# Make self.schedule a list so that we can add rows. Later we'll make
# this an Astropy Table.
self.schedule = []
plan = self.observing_plan
# Instance of the Scheduler
scheduler = Scheduler(plan)
# observed exposure time for each pointing
observed = numpy.zeros(len(self.tiledb.tile_table), dtype=numpy.float)
# range of dates for the survey
min_date = numpy.min(plan['JD'])
max_date = numpy.max(plan['JD'])
dates = range(min_date, max_date + 1)
if progress_bar:
generator = astropy.utils.console.ProgressBar(dates)
else:
generator = dates
for jd in generator:
if progress_bar is False:
print(f'scheduling JD={jd}.')
# Skips JDs not found in the plan or those that don't have good weather.
if jd not in plan['JD'] or plan[plan['JD'] == jd]['is_clear'][0] == 0:
continue
observed += self.schedule_one_night(jd, scheduler, observed)
# Convert schedule to Astropy Table.
self.schedule = astropy.table.Table(
rows=self.schedule,
names=['JD', 'observatory', 'target', 'group', 'tileid', 'index', 'ra', 'dec', 'pa',
'airmass', 'lunation', 'shadow_height', "moon_dist", 'lst', 'exptime', 'totaltime'],
dtype=[float, 'S10', 'S20', 'S20', int, int, float, float, float,
float, float, float, float, float, float, float])
def schedule_one_night(self, jd, scheduler, observed):
"""Schedules a single night at a single observatory.
This method is not intended to be called directly. Instead, use `.run`.
Parameters
----------
jd : int
The Julian Date to schedule. Must be included in ``plan``.
scheduler : .Scheduler
The Scheduler instance that will determine the observing sequence.
observed : ~numpy.array
An array of the length of the tiledb that records the observing time
accumulated on each tile thus far
Returns
-------
exposure_times : `~numpy.ndarray`
Array with the exposure times in seconds added to each tile during
this night.
"""
# initialize the scheduler for the night
scheduler.prepare_for_night(jd, self.observing_plan, self.tiledb)
# shortcut
tdb = self.tiledb.tile_table
# begin at twilight
current_jd = scheduler.evening_twi
# While the current time is before morning twilight ...
while current_jd < scheduler.morning_twi:
# obtain the next tile to observe
observed_idx, current_lst, hz, alt, lunation = scheduler.get_optimal_tile(current_jd, observed)
if observed_idx == -1:
# nothing available
self._record_observation(current_jd, self.observing_plan.observatory,
lst=current_lst,
exptime=self.time_step,
totaltime=self.time_step)
current_jd += (self.time_step) / 86400.0
continue
# observe it, give it one quantum of exposure
exptime = tdb['VisitExptime'].data[observed_idx]
observed[observed_idx] += exptime
# collect observation data to put in table
tileid_observed = tdb['TileID'].data[observed_idx]
target_index = tdb['TargetIndex'].data[observed_idx]
target_name = self.targets[target_index].name
groups = self.targets[target_index].groups
target_group = groups[0] if groups else 'None'
target_overhead = self.targets[target_index].overhead
# Get the index of the first value in index_to_target that matches
# the index of the target.
target_index_first = numpy.nonzero(tdb['TargetIndex'].data == target_index)[0][0]
# Get the index of the pointing within its target.
pointing_index = observed_idx - target_index_first
# Record angular distance to moon
dist_to_moon = scheduler.moon_to_pointings[observed_idx]
# Update the table with the schedule.
airmass = 1.0 / numpy.cos(numpy.radians(90.0 - alt))
self._record_observation(current_jd, self.observing_plan.observatory,
target_name=target_name,
target_group=target_group,
tileid = tileid_observed,
pointing_index=pointing_index,
ra=tdb['RA'].data[observed_idx],
dec=tdb['DEC'].data[observed_idx],
pa=tdb['PA'].data[observed_idx],
airmass=airmass,
lunation=lunation,
shadow_height= hz, #hz[valid_priority_idx[obs_tile_idx]],
dist_to_moon=dist_to_moon,
lst=current_lst,
exptime=exptime,
totaltime=exptime * target_overhead)
current_jd += exptime * target_overhead / 86400.0
return observed
def animate_survey(self, filename='lvm_survey.mp4', step=100,
observatory=None, projection='mollweide'):
"""Create an animation of the survey progress and save as an mp4 file.
Parameters
----------
filename : str
Name of the mp4 file, defaults to ``'lvm_survey.mp4'``.
step : int
Number of observations per frame of movie.
observatory : str
Either ``'LCO'`` or ``'APO'`` or `None` (plots both).
projection : str
Which projection of the sphere to use. Defaults to Mollweide.
"""
data = self.schedule[self.schedule['target'] != '-']
if observatory:
data = data[data['observatory'] == observatory]
ll = int(len(data) / step)
x,y = convert_to_mollweide(data['ra'], data['dec'])
tt = [target.name for target in self.targets]
g = numpy.array([tt.index(i) for i in data['target']], dtype=float)
t = data['JD']
fig, ax = get_axes(projection=projection)
# scat = ax.scatter(x[:1], y[:1], c=g[:1], s=1, edgecolor=None, edgecolors=None)
scat = ax.scatter(x, y, c=g % 19, s=0.05, edgecolor=None, edgecolors=None, cmap='tab20')
# fig.show()
# return
def animate(ii):
if ii % 10 == 0:
print('%.1f %% done\r' % (ii / ll * 100))
scat.set_offsets(numpy.stack((x[:ii * step], y[:ii * step]), axis=0).T)
scat.set_array(g[:ii * step])
ax.set_title(str(t[ii]))
return scat,
anim = animation.FuncAnimation(fig, animate, frames=range(1, ll), interval=1,
blit=True, repeat=False)
anim.save(filename, fps=24, extra_args=['-vcodec', 'libx264'])
def plot(self, observatory=None, projection='mollweide', tname=None, fast=False, annotate=False, edge=False):
"""Plots the observed pointings.
Parameters
----------
observatory : str
Plot only the points for that observatory. Otherwise, plots all
the pointings.
projection : str
The projection to use, either ``'mollweide'`` or ``'rectangular'``.
tname : str
Select only a particular target name to plot
fast : bool
Plot IFU sized and shaped patches if `False`. This is the default.
Allows accurate zooming and viewing. If `True`, plot scatter-plot
dots instead of IFUs, for speed sacrificing accuracy.
This is MUCH faster.
annotate : bool
Write the targets' names next to the target coordinates. Implies
``fast=True``.
edge : bool
Draw tile edges and make tiles partly transparent to better judge overlap.
Makes zoomed-out view look odd, so use default False.
Returns
-------
figure : `matplotlib.figure.Figure`
The figure with the plot.
"""
if annotate is True:
fast = True
color_cycler = cycler.cycler(bgcolor=['b', 'r', 'g', 'y', 'm', 'c', 'k'])
fig, ax = get_axes(projection=projection)
if tname != None:
data = self.schedule[self.schedule['target'] == tname]
else:
data = self.schedule[self.schedule['target'] != '-']
if observatory:
data = data[data['observatory'] == observatory]
if fast is True:
if projection == 'mollweide':
x,y = convert_to_mollweide(data['ra'], data['dec'])
else:
x,y = data['ra'], data['dec']
tt = [target.name for target in self.targets]
g = numpy.array([tt.index(i) for i in data['target']], dtype=float)
ax.scatter(x, y, c=g % 19, s=0.05, edgecolor=None, edgecolors=None, cmap='tab20')
if annotate is True:
_, text_indices = numpy.unique(g, return_index=True)
for i in range(len(tt)):
plt.text(x[text_indices[i]], y[text_indices[i]], tt[i], fontsize=9)
else:
for ii, sty in zip(range(len(self.targets)), itertools.cycle(color_cycler)):
target = self.targets[ii]
name = target.name
target_data = data[data['target'] == name]
if edge:
patches = [self.ifu.get_patch(scale=target.telescope.plate_scale, centre=[p['ra'], p['dec']], pa=p['pa'],
edgecolor='k', linewidth=1, alpha=0.5, facecolor=sty['bgcolor'])[0]
for p in target_data]
else:
patches = [self.ifu.get_patch(scale=target.telescope.plate_scale, centre=[p['ra'], p['dec']], pa=p['pa'],
edgecolor='None', linewidth=0.0, facecolor=sty['bgcolor'])[0]
for p in target_data]
if projection == 'mollweide':
patches = [transform_patch_mollweide(patch) for patch in patches]
for patch in patches:
ax.add_patch(patch)
if observatory != None:
ax.set_title(f'Observatory: {observatory}')
return fig
def _create_observing_plan(self):
"""Returns an `.ObservingPlan` from the configuration file."""
observatory = config['observing_plan']
obs_data = config['observing_plan'][observatory]
start_date = obs_data['start_date']
end_date = obs_data['end_date']
return ObservingPlan(start_date, end_date, observatory=observatory)
def _record_observation(self, jd, observatory, target_name='-', target_group='-',
tileid=-1, pointing_index=-1, ra=-999., dec=-999., pa=-999.,
airmass=-999., lunation=-999., shadow_height=-999., dist_to_moon=-999.,
lst=-999.,
exptime=0., totaltime=0.):
"""Adds a row to the schedule."""
self.schedule.append((jd, observatory, target_name, target_group, tileid, pointing_index,
ra, dec, pa, airmass, lunation, shadow_height, dist_to_moon, lst, exptime,
totaltime))
def get_target_time(self, tname, group=False, observatory=None, lunation=None,
return_lst=False):
"""Returns the JDs or LSTs for a target at an observatory.
Parameters
----------
tname : str
The name of the target or group. Use ``'-'`` for unused time.
group : bool
If not true, ``tname`` will be the name of a group not a single
target.
observatory : str
The observatory to filter for.
lunation : list
Restrict the data to a range in lunation. Defaults to returning
all lunations. Set to ``[lmin, lmax]`` to return values of
``lmin < lunation <= lmax``.
return_lst : bool
If `True`, returns an array with the LSTs of all the unobserved
times.
Returns
-------
table : `~numpy.ndarray`
An array containing the times the target is observed at an
observatory, as JDs. If ``return_lst=True`` returns an array of
the corresponding LSTs.
"""
column = 'group' if group is True else 'target'
t = self.schedule[self.schedule[column] == tname]
if observatory:
t = t[t['observatory'] == observatory]
if lunation != None:
t = t[(t['lunation'] > lunation[0]) * (t['lunation'] <= lunation[1])]
if return_lst:
return t['lst'].data
else:
return t['JD'].data
def print_statistics(self, out_file=None, out_format="ascii", overwrite_out=True, observatory=None, targets=None, return_table=False):
"""Prints a summary of observations at a given observatory.
Parameters
----------
observatory : str
The observatory to filter for.
targets : `~lvmsurveysim.target.TargetList`
The targets to summarize. If `None`, use ``self.targets``.
return_table : bool
If `True`, return a `~astropy.table.Table` with the results.
out_file : str
Outfile to write statistics.
out_format : str
Outfile format consistent with astropy.table dumps
"""
if targets is None:
targets = self.targets
names = [t.name for t in targets]
time_on_target = {} # time spent exposing target
exptime_on_target = {} # total time (exp + overhead) on target
tile_area = {} # area of a single tile
target_ntiles = {} # number of tiles in a target tiling
target_ntiles_observed = {} # number of observed tiles
target_nvisits = {} # number of visits for each tile
surveytime = 0.0 # total time of survey
names.append('-') # deals with unused time
for tname, i in zip(names, range(len(names))):
if (tname != '-'):
target = self.targets[i]
tile_area[tname] = target.get_pixarea(ifu=self.ifu)
target_ntiles[tname] = len(numpy.where(self.tiledb.tile_table['TargetIndex'] == i)[0])
target_nvisits[tname] = float(target.n_exposures / target.min_exposures)
else:
tile_area[tname] = -999
target_ntiles[tname] = -999
target_nvisits[tname] = 1
tdata = self.schedule[self.schedule['target'] == tname]
if observatory:
tdata = tdata[tdata['observatory'] == observatory]
exptime_on_target[tname] = numpy.sum(tdata['exptime'].data)
target_ntiles_observed[tname] = len(tdata) / target_nvisits[tname]
target_total_time = numpy.sum(tdata['totaltime'].data)
time_on_target[tname] = target_total_time
surveytime += target_total_time
# targets that completely overlap with others have no tiles
for t in self.targets:
if target_ntiles[t.name] == 0:
print(t.name + ' has no tiles')
target_ntiles[t.name] = 1
rows = [
(t if t != '-' else 'unused',
numpy.float(target_ntiles[t]),
numpy.around(target_ntiles_observed[t], decimals=2),
numpy.around(time_on_target[t] / 3600.0, decimals=2),
numpy.around(exptime_on_target[t] / 3600.0, decimals=2),
numpy.around(time_on_target[t] / surveytime, decimals=2),
numpy.around(target_ntiles_observed[t] * tile_area[t],
decimals=2) if t != '-' else -999,
numpy.around(float(target_ntiles_observed[t]) / float(target_ntiles[t]),
decimals=2) if t != '-' else -999)
for t in names]
stats = astropy.table.Table(rows=rows,
names=['Target', 'tiles', 'tiles_obs', 'tottime/h',
'exptime/h', 'timefrac', 'area', 'areafrac'],
dtype=('S8', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4'))
print('%s :' % (observatory if observatory != None else 'APO+LCO'))
stats.pprint(max_lines=-1, max_width=-1)
if out_file != None:
stats.write(out_file, format=out_format, overwrite=overwrite_out)
if return_table:
return stats
def plot_survey(self, observatory=None, bin_size=30., targets=None, groups=None,
use_groups=False, use_primary_group=True,
show_ungrouped=True, cumulative=False, lst=False,
lunation=None,
show_unused=True, skip_fast=False, show_mpld3=False):
"""Plot the hours spent on target.
Parameters
----------
observatory : str
The observatory to plot. If `None`, all observatories.
bin_size : int
The number of days in each bin of the plot.
targets : list
A list with the names of the targets to plot. If empty, plots all
targets.
groups : list
A list with the names of the groups to plot. If empty, plots all
groups.
use_groups : bool
If set, the targets are grouped together using the
``Target.groups`` list.
use_primary_group : bool
If `True`, a target will only be added to its primary group (the
first one in the group list). Only used when ``use_groups=True``.
show_ungrouped : bool
If `True`, targets that don't belong to any group are plotted
individually. Only used when ``use_groups=True``.
cumulative : bool or str
If `True`, plots the cumulative sum of hours spent on each target.
If ``'group'``, it plots the cumulative on-target hours normalised
by the total hours needed to observe the target group. If ``'survey'``,
plots the cumulative hours normalised by the total survey hours.
When ``cumulative`` is not `False`, ``bin_size`` is set to 1.
lst : bool
Whether to bin the used time by LST instead of JD.
show_unused : bool
Display the unused time.
lunation : list
Range of lunations to include in statistics. Defaults to all lunations.
Set to ``[lmin, lmax]`` to return values of ``lmin < lunation <= lmax``.
Can be used to restrict lst usage plots to only bright, grey, or
dark time.
skip_fast : bool
If set, do not plot targets that complete in the first 20% of the
survey.
Return
------
fig : `~matplotlib.figure.Figure`
The Matplotlib figure of the plot.
"""
assert self.schedule != None, 'you still have not run a simulation.'
if not targets:
targets = [target.name for target in self.targets]
ncols = 2 if len(targets) > 15 else 1
if lst:
bin_size = 1. if bin_size == 30. else bin_size
assert cumulative is False, 'cumulative cannot be used with lst=True.'
if cumulative != False:
bin_size = 1
fig, ax = plt.subplots(figsize=(12, 8))
# Leaves a margin on the right to put the legend
fig.subplots_adjust(right=0.65 if ncols == 2 else 0.8)
ax.set_prop_cycle(color=['r', 'g', 'b', 'c', 'm', 'y', 'g', 'b', 'c', 'm', 'y', 'r', 'b',
'c', 'm', 'y', 'r', 'g', 'c', 'm', 'y', 'r', 'g', 'b', ],
linestyle=['-', '--', '-.', ':', '-', '--', '-.', ':', '-', '--', '-.',
':', '-', '--', '-.', ':', '-', '--', '-.', ':', '-', '--',
'-.', ':'])
min_b = (numpy.min(self.schedule['JD']) - 2451545.0) if not lst else 0.0
max_b = (numpy.max(self.schedule['JD']) - 2451545.0) if not lst else 24.0
b = numpy.arange(min_b, max_b + bin_size, bin_size)
# Creates a list of groups to plot. If use_groups=False,
# this is just the list of targets.
if not use_groups:
groups = [target.name for target in self.targets]
else:
groups = groups or self.targets.get_groups()
# Adds the ungrouped targets.
if show_ungrouped:
for target in self.targets:
if len(target.groups) == 0:
groups.append(target.name)
for group in groups:
# Cumulated group heights
group_heights = numpy.zeros(len(b) - 1, dtype=numpy.float)
group_target_tot_time = 0.0
# If we are not using groups or the "group"
# name is that of an ungrouped target.
if not use_groups or group in self.targets._names:
targets = [group]
else:
targets = self.targets.get_group_targets(group, primary=use_primary_group)
for tname in targets:
t = self.targets.get_target(tname)
tindex = [target.name for target in self.targets].index(tname)
# plot each target
tt = self.get_target_time(tname, observatory=observatory, lunation=lunation, return_lst=lst)
if len(tt) == 0:
continue
if not lst:
tt -= 2451545.0
heights, bins = numpy.histogram(tt, bins=b)
heights = numpy.array(heights, dtype=float)
heights *= t.exptime * t.min_exposures / 3600.0
ntiles = len(numpy.where(self.tiledb.tile_table['TargetIndex'].data == tindex)[0])
target_tot_time = ntiles * t.exptime * t.n_exposures / 3600.
if skip_fast:
completion = heights.cumsum() / target_tot_time
if numpy.quantile(completion, 0.2) >= 1:
continue
group_heights += heights
group_target_tot_time += target_tot_time
# Only plot the heights if they are not zero. This prevents
# targets that are not observed at an observatory to be displayed.
if numpy.sum(group_heights) > 0:
if cumulative is False:
ax.plot(bins[:-1] + numpy.diff(bins) / 2, group_heights, label=group)
else:
ax.plot(bins[:-1] + | numpy.diff(bins) | numpy.diff |
import numpy as np
import json
network_snrs = []
ln_bfs = []
for i in range(2000):
try:
with open(f'injections/{i}_memory/IMR_mem_inj_non_mem_rec_result.json') as f:
res = json.load(f)
except Exception as e:
print(e)
with open(f'injections/{i}_memory/pp_result.json') as f:
pp_res = json.load(f)
snrs_squared = [res['meta_data']['likelihood']['interferometers'][ifo]['optimal_SNR']**2 for ifo in ['H1', 'L1', 'V1']]
network_snrs.append(np.sqrt(np.sum(snrs_squared)))
ln_bfs.append(pp_res['memory_log_bf'])
print(i)
print()
ln_bfs = np.array(ln_bfs)
network_snrs = np.array(network_snrs)
low_snr_indices = np.where(network_snrs < 15)
high_snr_indices = | np.where(network_snrs > 15) | numpy.where |
import cftime
import numpy as np
import pandas as pd
import xarray as xr
from xclim.indices import generic
class TestSelectResampleOp:
def test_month(self, q_series):
q = q_series(np.arange(1000))
o = generic.select_resample_op(q, "count", freq="YS", month=3)
np.testing.assert_array_equal(o, 31)
def test_season_default(self, q_series):
# Will use freq='YS', so count J, F and D of each year.
q = q_series(np.arange(1000))
o = generic.select_resample_op(q, "min", season="DJF")
assert o[0] == 0
assert o[1] == 366
def test_season(self, q_series):
q = q_series(np.arange(1000))
o = generic.select_resample_op(q, "count", freq="AS-DEC", season="DJF")
assert o[0] == 31 + 29
class TestThresholdCount:
def test_simple(self, tas_series):
ts = tas_series(np.arange(365))
out = generic.threshold_count(ts, "<", 50, "Y")
np.testing.assert_array_equal(out, [50, 0])
class TestDomainCount:
def test_simple(self, tas_series):
ts = tas_series(np.arange(365))
out = generic.domain_count(ts, low=10, high=20, freq="Y")
np.testing.assert_array_equal(out, [10, 0])
class TestDailyDownsampler:
def test_std_calendar(self):
# standard calendar
# generate test DataArray
time_std = pd.date_range("2000-01-01", "2000-12-31", freq="D")
da_std = xr.DataArray( | np.arange(time_std.size) | numpy.arange |
#
# Copyright 2020 Logical Clocks AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import time
from copy import deepcopy
from abc import abstractmethod
import numpy as np
from maggy.optimizer.abstractoptimizer import AbstractOptimizer
class BaseAsyncBO(AbstractOptimizer):
"""Base class for asynchronous bayesian optimization
Do not initialize this class! only it's subclasses
**async bo**
Bayesian Optimization consists of a surrogat model to approximate the black-box function and an acquisition
function to select the next hyperparameter configuration. In an asynchronous setting, we need to encourage diversity
when maximizing the acquisition function to prevent redundant sampling.
**optimization direction**
all optimizations are converted to minimizations problems via the `get_metrics_dict()` and `get_metrics_array()`
methods, where the metrics are negated in case of a maximization.
In the `final_store` the original metric values are saved.
**pruner**
Pruners can be run as a subroutine of an optimizer. It's interface `pruner.pruning_routine()` is called at the beginning
of the `get_suggestion()` method and returns the hparam config and budget for the trial. The `trial_id` of the
newly created Trial object is reported back to the pruner via `pruner.report_trial()`.
**budget**
If a pruner was specified ( and hence a multi-fidelity optimization is conducted ), the budget is added as an
additional hyperparameter to the hparam dict so it is passed to the map_fun. Note, that when fitting the
surrogate model the "budget" key is obviously omitted.
**models**
The surrogate models for different budgets are saved in the `models` dict with the budgets as key. In case of
a single fidelity optimization without a pruner or pruner with interim_results, The only model has the key `0`.
If we fit multiple models, sample new hparam configs always from the largest model available (i.e. biggest budget)
**imputing busy trials** (poss. shift to simple.py)
the imputed metrics are calculated on the fly in `get_metrics_array(include_busy_locations=True, budget=`budget`)`
for currently evaluating trials that were sampled from model with `budget`
**interim results**
In literature most surrogate models of BO algorithms are fit with the hyperparameter config and the final metric
of a trial.
Using additionally interim metrics of trials, i.e. use metrics that were trained for varying budgets makes it possible
to use early stopping in BO. (Also it makes it possible to have only one model in multi fidelity bo instead of one
model per fidelity)
For each interim metric a hyperparameter config augumented with the budget the metric was generated with is added
to the training data of the surrogate model
.. math::
z_t = [x_t, n_t] ; y_t = y_{t,nt}
When maximizing the acquisition function to sample the next hyperparameter config to evaluate, always augument with
the max budget N
.. math::
xt ← argmax acq([x, N])
"""
def __init__(
self,
num_warmup_trials=15,
random_fraction=0.33,
interim_results=False,
interim_results_interval=10,
**kwargs
):
"""
Attributes
----------
models (dict): The surrogate models for different budgets are saved in the `models` dict with the budgets as key.
In case of a single fidelity optimization without a pruner. The only model has the key `0`.
warm_up_configs(list[dict]): list of hparam configs used for warming up
sampling_time_start (float): helper variable to calculate time needed for calculating next suggestion, i.e
when sampling from model (`sample_type`=="model"). Calulating the time happens
in `create_trial()`
normalize_categorical (bool): If True, the encoded categorical hparam is also max-min
normalized between 0 and 1 in searchspace.transform()
:param num_warmup_trials: number of random trials at the beginning of experiment
:type num_warmup_trials: int
:param random_fraction: fraction of random samples, between [0,1]
:type random_fraction: float
:param interim_results: If True, use interim metrics from trials for fitting surrogate model. Else use final
metrics only
:type interim_results: bool
:param interim_results_interval: Specifies which interim metrics are used (if interim_results==True)
e.g. interval=10: the metric of every 10th epoch is used for fitting surrogate
:type interim_results_interval: int
"""
super().__init__(**kwargs)
# configure warmup routine
self.num_warmup_trials = num_warmup_trials
self.warmup_sampling = "random"
self.warmup_configs = [] # keeps track of warmup warmup configs
allowed_warmup_sampling_methods = ["random"]
if self.warmup_sampling not in allowed_warmup_sampling_methods:
raise ValueError(
"expected warmup_sampling to be in {}, got {}".format(
allowed_warmup_sampling_methods, self.warmup_sampling
)
)
# surrogate model related aruments
self.models = {} # fitted model of the estimator
self.random_fraction = random_fraction
self.interim_results = interim_results
self.interim_results_interval = interim_results_interval
# helper variable to calculate time needed for calculating next suggestion
self.sampling_time_start = 0.0
# If True, the encoded categorical hparam is also max-min normalized between 0 and 1 in searchspace.transform()
self.normalize_categorical = True
if self.name() == "TPE":
self.normalize_categorical = False
def initialize(self):
# validate hparam types
# at least one hparam needs to be continuous & no DISCRETE hparams
cont = False
for hparam in self.searchspace.items():
if hparam["type"] == self.searchspace.DISCRETE:
raise ValueError(
"This version of Bayesian Optimization does not support DISCRETE Hyperparameters yet, please encode {} as INTEGER".format(
hparam["name"]
)
)
if hparam["type"] in [self.searchspace.DOUBLE, self.searchspace.INTEGER]:
cont = True
if not cont:
raise ValueError(
"In this version of Bayesian Optimization at least one hparam has to be continuous (DOUBLE or INTEGER)"
)
self.warmup_routine()
self.init_model()
def get_suggestion(self, trial=None):
self._log("### start get_suggestion ###")
self.sampling_time_start = time.time()
if trial:
self._log(
"last finished trial: {} with params {}".format(
trial.trial_id, trial.params
)
)
else:
self._log("no previous finished trial")
# check if experiment has finished
if self._experiment_finished():
return None
# pruning routine
if self.pruner:
next_trial_info = self.pruner.pruning_routine()
if next_trial_info == "IDLE":
self._log(
"Worker is IDLE and has to wait until a new trial can be scheduled"
)
return "IDLE"
elif next_trial_info is None:
# experiment is finished
self._log("Experiment has finished")
return None
elif next_trial_info["trial_id"]:
# copy hparams of given promoted trial and start new trial with it
parent_trial_id = next_trial_info["trial_id"]
parent_trial_hparams = deepcopy(
self.get_hparams_dict(trial_ids=parent_trial_id)[parent_trial_id]
)
# update trial info dict and create new trial object
next_trial = self.create_trial(
hparams=parent_trial_hparams,
sample_type="promoted",
run_budget=next_trial_info["budget"],
)
# report new trial id to pruner
self.pruner.report_trial(
original_trial_id=parent_trial_id, new_trial_id=next_trial.trial_id,
)
self._log(
"use hparams from promoted trial {}. \n start trial {}: {} \n".format(
parent_trial_id, next_trial.trial_id, next_trial.params
)
)
return next_trial
else:
# start sampling procedure with given budget
run_budget = next_trial_info["budget"]
if self.interim_results:
model_budget = 0
else:
model_budget = run_budget
else:
run_budget = 0
model_budget = 0
# check if there are still trials in the warmup buffer
if self.warmup_configs:
self._log("take sample from warmup buffer")
next_trial_params = self.warmup_configs.pop()
next_trial = self.create_trial(
hparams=next_trial_params, sample_type="random", run_budget=run_budget,
)
elif np.random.rand() < self.random_fraction:
# random fraction applies, sample randomly
hparams = self.searchspace.get_random_parameter_values(1)[0]
next_trial = self.create_trial(
hparams=hparams, sample_type="random", run_budget=run_budget
)
self._log("sampled randomly: {}".format(hparams))
else:
# update model
if self.pruner and not self.interim_results:
# skip model building if we already have a bigger model
if max(list(self.models.keys()) + [-np.inf]) <= model_budget:
self.update_model(model_budget)
else:
self.update_model(model_budget)
if not self.models:
# in case there is no model yet, sample randomly
hparams = self.searchspace.get_random_parameter_values(1)[0]
next_trial = self.create_trial(
hparams=hparams, sample_type="random", run_budget=run_budget
)
self._log("sampled randomly: {}".format(hparams))
else:
if self.pruner and not self.interim_results:
# sample from largest model available
model_budget = max(self.models.keys())
# sample from model with model budget
self._log(
"start sampling routine from model with budget {}".format(
model_budget
)
)
hparams = self.sampling_routine(model_budget)
next_trial = self.create_trial(
hparams=hparams,
sample_type="model",
run_budget=run_budget,
model_budget=model_budget,
)
self._log(
"sampled from model with budget {}: {}".format(
model_budget, hparams
)
)
# check if Trial with same hparams has already been created
i = 0
while self.hparams_exist(trial=next_trial):
self._log("sample randomly to encourage exploration")
hparams = self.searchspace.get_random_parameter_values(1)[0]
next_trial = self.create_trial(
hparams=hparams, sample_type="random_forced", run_budget=run_budget
)
i += 1
if i > 3:
self._log(
"not possible to sample new config. Stop Experiment (most/all configs have already been used)"
)
return None
# report new trial id to pruner
if self.pruner:
self.pruner.report_trial(
original_trial_id=None, new_trial_id=next_trial.trial_id
)
self._log(
"start trial {}: {}, {} \n".format(
next_trial.trial_id, next_trial.params, next_trial.info_dict
)
)
return next_trial
def finalize_experiment(self, trials):
return
@abstractmethod
def init_model(self):
"""initializes the surrogate model of the gaussian process
the model gets created with the right parameters, but is not fit with any data yet. the `base_model` will be
cloned in `update_model` and fit with observation data
"""
raise NotImplementedError
@abstractmethod
def update_model(self, budget=0):
"""update surrogate model with new observations
Use observations of finished trials + liars from busy trials to build model.
Only build model when there are at least as many observations as hyperparameters
:param budget: the budget for which model should be updated. Default is 0
If budget > 0 : multifidelity optimization. Only use observations that were run with
`budget` for updateing model for that `budget`. One model exists per budget.
If == 0: single fidelity optimization. Only one model exists that is fitted with all observations
:type budget: int
"""
raise NotImplementedError
@abstractmethod
def sampling_routine(self, budget=0):
"""Samples new config from model
This methods holds logic for:
- maximizing acquisition function based on current model and observations
- async logic: i.e. imputing busy_locations with a liar to encourage diversity in sampling
:param budget: the budget from which model should be sampled. Default is 0
If budget > 0 : multifidelity optimization. Only use observations that were run with
`budget` for updateing model for that `budget`. One model exists per budget.
If == 0: single fidelity optimization. Only one model exists that is fitted with all observations
:type budget: int
:return: hyperparameter config that minimizes the acquisition function
:rtype: dict
"""
raise NotImplementedError
def warmup_routine(self):
"""implements logic for warming up bayesian optimization through random sampling by adding hparam configs to
`warmup_config` list
"""
# generate warmup hparam configs
if self.warmup_sampling == "random":
self.warmup_configs = self.searchspace.get_random_parameter_values(
self.num_warmup_trials
)
else:
raise NotImplementedError(
"warmup sampling {} doesnt exist, use random".format(
self.warmup_sampling
)
)
def _experiment_finished(self):
"""checks if experiment is finished
In normal BO, experiment has finished when specified amount of trials have run,
in BOHB/ASHA when all iterations have been finished
:return: True if experiment has finished, False else
:rtype: bool
"""
if self.pruner:
if self.pruner.finished():
return True
elif len(self.final_store) >= self.num_trials:
self._log(
"Finished experiment, ran {}/{} trials".format(
len(self.final_store), self.num_trials
)
)
return True
else:
return False
def get_busy_locations(self, budget=0):
"""returns hparams of currently evaluating trials
Considers only trials that were sampled from model with specified budget
This is a helper functions used when async strategy is `impute`
"""
if not self.include_busy_locations():
raise ValueError(
"Only Optimizer GP with async_strategy == `impute` can include busy locations. Got Optimizer {} {}".format(
self.name(),
"async_strategy: " + self.async_strategy
if self.name() == "GP"
else "",
)
)
hparams_busy = np.array(
[
self.searchspace.dict_to_list(trial.params)
for trial_id, trial in self.trial_store.items()
if trial.info_dict["sample_type"] == "model"
and trial.info_dict["model_budget"] == budget
]
)
return hparams_busy
def get_imputed_metrics(self, budget=0):
"""returns imputed metrics for currently evaluating trials
Considers only trials that were sampled from model with specified budget
This is a helper function, only used when async strategy is `impute`
"""
if not self.include_busy_locations():
raise ValueError(
"Only Optimizer GP with async_strategy == `impute` can include busy locations. Got Optimizer {} {}".format(
self.name(),
"async_strategy: " + self.async_strategy
if self.name() == "GP"
else "",
)
)
metrics_busy = np.empty(0, dtype=float)
for trial_id, trial in self.trial_store.items():
if (
trial.info_dict["sample_type"] == "model"
and trial.info_dict["model_budget"] == budget
):
imputed_metric = self.impute_metric(trial.params, budget)
metrics_busy = np.append(metrics_busy, imputed_metric)
# add info about imputed metric to trial info dict
if "imputed_metrics" in trial.info_dict.keys():
trial.info_dict["imputed_metrics"].append(imputed_metric)
else:
trial.info_dict["imputed_metrics"] = [imputed_metric]
return metrics_busy
def get_XY(self, budget=0, interim_results=False, interim_results_interval=10):
"""get transformed hparams and metrics for fitting surrogate
:param budget: budget for which model should be build. Default is 0
If budget > 0 : One model exists per budget. Only use observations that were run with
`budget` for updateing model for that `budget`.
If == 0: Only one model exists that is fitted with all observations
:type budget: int
:param interim_results: If True interim results from metric history are used and hparams are augumented with budget.
i.e. for every interim result have one observation with
.. math::
z_t = [x_t, n_t] ; y_t = y_{t,nt}
Else use final metrics only
:type interim_results: bool
:param interim_results_interval: Specifies the interval of the interim results being used, If interim_results==True
e.g. if 10, use every metric of every 10th epoch
:type interim_results_interval: int
:return: Tuple of 2 arrays, the first containing hparams and metrics.
There are four scenarios:
* Hparams and Metrics of finalized trials
shapes: (n_finalized_trials, n_hparams), (n_finalized_trials,)
* Hparams and Metrics of finalized trials
+ hparams and imputed metrics of busy_locations (if async_strategy=="asnyc")
shapes: (n_finalized_trials+n_busy_locations, n_hparams), (n_finalized_trials+n_busy_locations,)
* Hparams (augumented with budget) and interim metrics of finalized trials
shapes: (n_interim_results, n_hparams + 1), (n_interim_results,)
Note that first and final metric of each trial are always used
and that trials may be trained with different budgets
* Hparams (augumented with budget) and interim metrics of finalized trials
+ hparams and imputed final metric for evaluating trials (if async_strategy=="asnyc")
shapes: (n_interim_results+n_busy_locations, n_hparams + 1),
(n_interim_results+n_busy_locations,)
:rtype: (np.ndarray, np.ndarray)
"""
if not interim_results:
# return final metrics only
# get hparams and final metrics of finalized trials
hparams = self.get_hparams_array(budget=budget)
metrics = self.get_metrics_array(budget=budget, interim_metrics=False)
# if async strategy is `impute`
if self.include_busy_locations():
# get hparams and imputed metrics of evaluating trials
hparams_busy = self.get_busy_locations(budget=budget)
imputed_metrics = self.get_imputed_metrics(budget=budget)
assert hparams_busy.shape[0] == imputed_metrics.shape[0], (
"Number of evaluating trials and imputed "
"metrics needs to be equal, "
"got n_busy_locations: {}, "
"n_imputed_metrics: {}".format(
hparams_busy.shape[0], imputed_metrics.shape[0]
)
)
# append to hparams and metrics
if len(hparams_busy) > 0:
hparams = np.concatenate((hparams, hparams_busy))
metrics = np.concatenate((metrics, imputed_metrics))
# transform hparams
# note that through transform, budget param gets ommited from hparams if it was existent (pruner)
hparams_transform = np.apply_along_axis(
self.searchspace.transform,
1,
hparams,
normalize_categorical=self.normalize_categorical,
)
X = hparams_transform
y = metrics
assert X.shape[1] == len(
self.searchspace.keys()
), "shape[1] of X needs to be equal to number of hparams"
elif interim_results:
# so far only supported for GP and no pruner
# return interim results and hparams augumented with budget
# return every nth interim result according to interim_results_interval. always return first and last result
# get and transform hparams of all finalized trials
hparams = self.get_hparams_array(budget=budget)
hparams_transform = np.apply_along_axis(
self.searchspace.transform,
1,
hparams,
normalize_categorical=self.normalize_categorical,
)
# get full metric history for all finalized trials
metrics = self.get_metrics_array(
interim_metrics=True, budget=budget
) # array of metric history arrays
# get indices of hparams/metrics to be used for each trial
interim_result_indices = np.array(
[
self.get_interim_result_idx(
metric_history, interim_results_interval
)
for metric_history in metrics
]
) # araay of list. each list represents the indices of the metrics to be used of that trial
# only use every nth interim result of metric history. specified with interim_result_indices
metrics_filtered = np.array(
[
metrics[trial_idx][interim_result_indices[trial_idx]]
for trial_idx in range(metrics.shape[0])
]
)
# flatten results so they can be used for fitting posterior
# if arrays in yi_filtered do not have same length, e.g. some trials were early stopped or pruner was activated,
# yi_filtered is ragged (yi_filtered.shape = (n_trials,)) and flatten does not work, use np.hstack().
# if all trials have been trained on max budget (yi_filtered.shape = (n_trials, n_filtered_results)) use flatten() since it is better for performance.
if len(metrics_filtered.shape) == 2:
metrics_flat = metrics_filtered.flatten()
else:
metrics_flat = np.hstack(metrics_filtered)
# augument hparams with budget, i.e. z_t = [x_t, n_t] for every interim result
max_budget = self.get_max_budget()
n_hparams = len(self.searchspace.keys())
hparams_augumented = np.empty((0, n_hparams + 1))
# create one hparam config augumented with the corresponding normalized budget for every interim result
# loop through trials
for indices, trial_hparams in zip(
interim_result_indices, hparams_transform
):
# loop through interim results
for idx in indices:
# idx is budget
normalized_budget = self.searchspace._normalize_integer(
[0, max_budget - 1], idx
)
augumented_trial_hparams = np.append(
deepcopy(trial_hparams), normalized_budget
)
hparams_augumented = np.vstack(
(hparams_augumented, augumented_trial_hparams)
)
# add evaluating trials if impute strategy, i.e. z = [x, max_budget] y = imputed_metric
if self.include_busy_locations():
# get params and imputed metrics of evaluating trials
hparams_busy = self.get_busy_locations(budget=budget)
imputed_metrics = self.get_imputed_metrics(budget=budget)
assert (
hparams_busy.shape[0] == imputed_metrics.shape[0]
), "Number of evaluating trials and imputed metrics needs to be equal, got n_busy_locations: {}, n_imputed_metrics: {}".format(
hparams_busy.shape[0], imputed_metrics.shape[0]
)
if len(hparams_busy) > 0:
# transform hparams
hp_trans = np.apply_along_axis(
self.searchspace.transform,
1,
hparams_busy,
normalize_categorical=self.normalize_categorical,
)
# augument with max budget (i.e. always 1 in normalized form)
hp_aug = np.append(
hp_trans, np.ones(hp_trans.shape[0]).reshape(-1, 1), 1
)
# append to hparams and metrics
hparams_augumented = np.concatenate((hparams_augumented, hp_aug))
metrics_flat = | np.concatenate((metrics_flat, imputed_metrics)) | numpy.concatenate |
import numpy as np
import numpy.linalg as la
def barrier_move_not_acceptable_for_qp(P, q, x, residual, f_val, v, f_prime, d_residual, s, t, alpha):
moved = x + s * v
moved_val = t * (.5 * np.matmul(np.matmul(moved.T, P), moved) + np.matmul(q.T, moved)) \
- np.sum(np.log(residual + s * d_residual))
return moved_val >= f_val + alpha * s * f_prime
def log_barrier_for_qp(P, q, A, b, alpha, beta, mu, iterations, tol=1e-3, eps=1e-6):
t = 1
gap = np.inf
m = A.shape[0]
n = A.shape[1]
x = np.zeros((n, 1))
gap_results = []
for iteration in range(iterations):
residual = (b - np.matmul(A, x)).astype('f')
f_val = t * (.5 * np.matmul(x.T, np.matmul(P, x)) + np.matmul(q.T, x)) - np.sum(np.log(residual))
f_grad = t * (np.matmul(P, x) + q) + np.matmul(A.T, np.reciprocal(residual))
f_hess = t * P + np.matmul(A.T, np.matmul(np.diag(np.reciprocal(np.square(residual))[:, 0]), A))
v = -1 * la.lstsq(f_hess, f_grad, rcond=None)[0]
f_prime = np.matmul(f_grad.T, v)
d_residual = -np.matmul(A, v)
s = 1
while np.min(residual + s * d_residual) <= 0:
s *= beta
while barrier_move_not_acceptable_for_qp(P, q, x, residual, f_val, v, f_prime, d_residual, s, t, alpha):
s *= beta
if s == 0:
break
x += s * v
if -f_prime < eps:
gap = m / t
if gap < tol:
print('Iteration: %d - final gap: %f' % (iteration, gap))
gap_results.append(gap)
break
t = mu * t
gap_results.append(gap)
return gap_results
def calculate_new_r(P, q, A, t_inv, step, z, dz, x, dx, s, ds):
newz = z + step * dz
newx = x + step * dx
news = s + step * ds
return np.concatenate((np.matmul(P, newx) + q + np.matmul(A.T, newz), newz * news - t_inv), axis=0)
def interior_point_for_qp(P, q, A, b, alpha, beta, mu, iterations, tol=1e-3, eps=1e-6):
m = A.shape[0]
n = A.shape[1]
x = np.zeros((n, 1))
s = (b - np.matmul(A, x)).astype('f')
z = np.reciprocal(s)
surrogates = []
dual_residual_norms = []
for iteration in range(iterations):
gap = np.asscalar(np.matmul(s.T, z))
res = | np.matmul(P, x) | numpy.matmul |
import numpy as np
"""
备注:本示例中所有的matrix都使用的是array来体现
实际上python中存在专门的matrix
# 主要注意matrix和array的区别
# matrix 使用更加接近matlab的使用方式
简单示例如下:
B = np.matrix('1,2;3,4')
print(B*B)
# 注意:若使用matrix,则*直接表示矩阵乘法
# 而元素对应乘积则使用 multiply() 函数
"""
def vector_dot():
a = np.array([1, 3, 5, 7, 9])
b = np.array([1, 2, 3, 4, 5])
# 对应元素相乘
print(a * b)
# 对应元素2次幂
print(a ** 2)
# dot product
print(a.dot(b))
print(type(a))
print(a.dtype)
print(a.shape)
def array_matrix_sum():
d = np.arange(10)
# 求和:针对数组
print(d.sum())
# 求最值
print(np.min(d)) # 两种方式结果一致
print(d.max())
# 累加
print(d.cumsum())
x = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
# 求和:针对矩阵
print(x.sum())
print(np.sum(x, axis=0)) # 列sum
print(x.sum(axis=1)) # 行sum
def array_matrix_slice():
# start,stop,num
d = np.linspace(0, 90, 10)
print(d)
# start,stop,num,endpoint=False
d = np.linspace(0, 100, 10, endpoint=False)
print(d)
# 切片
indices = [1, 3, -1]
print(d[indices])
print(d[[1, 3, -1]]) # 外[]表示d的下标,内[]表示下标列表
print(d[:5])
# 按照条件 切片
print(d[d >= 50])
# 按照条件 查找元素下标
index = np.where(d >= 50)
print(index[0])
print(d[index[0]])
e = np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12],
[13, 14, 15, 16]])
print(e)
# 矩阵切片方式
print(e[1:3, 1:3])
# 选取矩阵中某点
print(e[3, 2])
print(e[(3, 2)])
# 花式索引 --- array[[x],[y]]
# 坐标对应(1,1),(1,2),(2,1),(2,2)
# 最终将回去的所有点排成一个一维数组
print(e[[1, 1, 2, 2], [1, 2, 1, 2]])
#
print(e[np.arange(1, 3), 1:3])
# 返回布尔值,(针对对应元素)
# 该布尔值也可以用于索引切片
print(e % 2 == 0)
print(e[e % 2 == 0])
def array_stack():
a = np.array([[1, 2, 3, 4], [5, 6, 7, 8]])
b = np.array([0, 1, 0, -1])
c = np.array([0, 1, 0, -1]).reshape(2, 2)
print('-------------堆叠--------------')
# 垂直堆叠
r = np.vstack([a, b])
print(r)
# 水平堆叠
r2 = | np.hstack([a, c]) | numpy.hstack |
"""Dark matter - electron scattering
"""
import numericalunits as nu
import numpy as np
from scipy.interpolate import RegularGridInterpolator, interp1d
from scipy.integrate import quad, dblquad
from scipy.stats import binom
import wimprates as wr
export, __all__ = wr.exporter()
__all__ += ['dme_shells', 'l_to_letter', 'l_to_number']
# Load form factor and construct interpolators
shell_data = wr.load_pickle('dme/dme_ionization_ff.pkl')
for _shell, _sd in shell_data.items():
_sd['log10ffsquared_itp'] = RegularGridInterpolator(
(_sd['lnks'], _sd['lnqs']),
np.log10(_sd['ffsquared']),
bounds_error=False, fill_value=-float('inf'),)
dme_shells = [(5, 1), (5, 0), (4, 2), (4, 1), (4, 0)]
l_to_number = dict(s=0, p=1, d=2, f=3)
l_to_letter = {v: k for k, v in l_to_number.items()}
@export
def shell_str(n, l):
if isinstance(l, str):
return str(n) + l
return str(n) + l_to_letter[l]
@export
def dme_ionization_ff(shell, e_er, q):
"""Return dark matter electron scattering ionization form factor
Outside the parametrized range, the form factor is assumed 0
to give conservative results.
:param shell: Name of atomic shell, e.g. '4p'
Note not all shells are included in the data.
:param e_er: Electronic recoil energy
:param q: Momentun transfer
"""
if isinstance(shell, tuple):
shell = shell_str(*shell)
lnq = | np.log(q / (nu.me * nu.c0 * nu.alphaFS)) | numpy.log |
"""Module to provide functionality to import structures."""
# pylint: disable=no-self-use
import io
import datetime
from collections import OrderedDict
import numpy as np
import ipywidgets as ipw
from traitlets import Instance, Int, List, Unicode, Union, dlink, link, default, observe
from sklearn.decomposition import PCA
# ASE imports
import ase
from ase import Atom, Atoms
from ase.data import chemical_symbols, covalent_radii
# AiiDA imports
from aiida.engine import calcfunction
from aiida.orm import CalcFunctionNode, CalcJobNode, Data, QueryBuilder, Node, WorkChainNode
from aiida.plugins import DataFactory
# Local imports
from .utils import get_ase_from_file
from .viewers import StructureDataViewer
from .data import LigandSelectorWidget
CifData = DataFactory('cif') # pylint: disable=invalid-name
StructureData = DataFactory('structure') # pylint: disable=invalid-name
SYMBOL_RADIUS = {key: covalent_radii[i] for i, key in enumerate(chemical_symbols)}
class StructureManagerWidget(ipw.VBox):
'''Upload a structure and store it in AiiDA database.
Attributes:
structure(Atoms): trait that contains the selected structure. 'None' if no structure is selected.
structure_node(StructureData, CifData): trait that contains AiiDA structure object
node_class(str): trait that contains structure_node type (as string).
'''
input_structure = Union([Instance(Atoms), Instance(Data)], allow_none=True)
structure = Union([Instance(Atoms), Instance(Data)], allow_none=True)
structure_node = Instance(Data, allow_none=True, read_only=True)
node_class = Unicode()
SUPPORTED_DATA_FORMATS = {'CifData': 'cif', 'StructureData': 'structure'}
def __init__(self, importers, viewer=None, editors=None, storable=True, node_class=None, **kwargs):
"""
Arguments:
importers(list): list of tuples each containing the displayed name of importer and the
importer object. Each object should contain 'structure' trait pointing to the imported
structure. The trait will be linked to 'structure' trait of this class.
storable(bool): Whether to provide Store button (together with Store format)
node_class(str): AiiDA node class for storing the structure.
Possible values: 'StructureData', 'CifData' or None (let the user decide).
Note: If your workflows require a specific node class, better fix it here.
"""
# History of modifications
self.history = []
# Undo functionality.
btn_undo = ipw.Button(description='Undo', button_style='success')
btn_undo.on_click(self.undo)
self.structure_set_by_undo = False
# To keep track of last inserted structure object
self._inserted_structure = None
# Structure viewer.
if viewer:
self.viewer = viewer
else:
self.viewer = StructureDataViewer(downloadable=False)
dlink((self, 'structure'), (self.viewer, 'structure'))
# Store button.
self.btn_store = ipw.Button(description='Store in AiiDA', disabled=True)
self.btn_store.on_click(self.store_structure)
# Label and description that are stored along with the new structure.
self.structure_label = ipw.Text(description='Label')
self.structure_description = ipw.Text(description='Description')
# Store format selector.
data_format = ipw.RadioButtons(options=self.SUPPORTED_DATA_FORMATS, description='Data type:')
link((data_format, 'label'), (self, 'node_class'))
# Store button, store class selector, description.
store_and_description = [self.btn_store] if storable else []
if node_class is None:
store_and_description.append(data_format)
elif node_class in self.SUPPORTED_DATA_FORMATS:
self.node_class = node_class
else:
raise ValueError("Unknown data format '{}'. Options: {}".format(node_class,
list(self.SUPPORTED_DATA_FORMATS.keys())))
self.output = ipw.HTML('')
children = [
self._structure_importers(importers), self.viewer,
ipw.HBox(store_and_description + [self.structure_label, self.structure_description])
]
structure_editors = self._structure_editors(editors)
if structure_editors:
structure_editors = ipw.VBox([btn_undo, structure_editors])
accordion = ipw.Accordion([structure_editors])
accordion.selected_index = None
accordion.set_title(0, 'Edit Structure')
children += [accordion]
super().__init__(children=children + [self.output], **kwargs)
def _structure_importers(self, importers):
"""Preparing structure importers."""
if not importers:
raise ValueError("The parameter importers should contain a list (or tuple) of "
"importers, got a falsy object.")
# If there is only one importer - no need to make tabs.
if len(importers) == 1:
# Assigning a function which will be called when importer provides a structure.
dlink((importers[0], 'structure'), (self, 'input_structure'))
return importers[0]
# Otherwise making one tab per importer.
importers_tab = ipw.Tab()
importers_tab.children = [i for i in importers] # One importer per tab.
for i, importer in enumerate(importers):
# Labeling tabs.
importers_tab.set_title(i, importer.title)
dlink((importer, 'structure'), (self, 'input_structure'))
return importers_tab
def _structure_editors(self, editors):
"""Preparing structure editors."""
if editors and len(editors) == 1:
link((editors[0], 'structure'), (self, 'structure'))
if editors[0].has_trait('selection'):
link((editors[0], 'selection'), (self.viewer, 'selection'))
if editors[0].has_trait('camera_orientation'):
dlink((self.viewer._viewer, '_camera_orientation'), (editors[0], 'camera_orientation')) # pylint: disable=protected-access
return editors[0]
# If more than one editor was defined.
if editors:
editors_tab = ipw.Tab()
editors_tab.children = tuple(editors)
for i, editor in enumerate(editors):
editors_tab.set_title(i, editor.title)
link((editor, 'structure'), (self, 'structure'))
if editor.has_trait('selection'):
link((editor, 'selection'), (self.viewer, 'selection'))
if editor.has_trait('camera_orientation'):
dlink((self.viewer._viewer, '_camera_orientation'), (editor, 'camera_orientation')) # pylint: disable=protected-access
return editors_tab
return None
def store_structure(self, _=None):
"""Stores the structure in AiiDA database."""
if self.structure_node is None:
return
if self.structure_node.is_stored:
self.output.value = "Already stored in AiiDA [{}], skipping...".format(self.structure_node)
return
self.btn_store.disabled = True
self.structure_node.label = self.structure_label.value
self.structure_label.disabled = True
self.structure_node.description = self.structure_description.value
self.structure_description.disabled = True
if isinstance(self.input_structure, Data) and self.input_structure.is_stored:
# Make a link between self.input_structure and self.structure_node
@calcfunction
def user_modifications(source_structure): # pylint: disable=unused-argument
return self.structure_node
structure_node = user_modifications(self.input_structure)
else:
structure_node = self.structure_node.store()
self.output.value = "Stored in AiiDA [{}]".format(structure_node)
def undo(self, _):
"""Undo modifications."""
self.structure_set_by_undo = True
if self.history:
self.history = self.history[:-1]
if self.history:
self.structure = self.history[-1]
else:
self.input_structure = None
self.structure_set_by_undo = False
@staticmethod
@default('node_class')
def _default_node_class():
return 'StructureData'
@observe('node_class')
def _change_structure_node(self, _=None):
with self.hold_trait_notifications():
self._sync_structure_node()
def _sync_structure_node(self):
"""Synchronize the structure_node trait using the currently provided info."""
if len(self.history) > 1:
# There are some modifications, so converting from ASE.
structure_node = self._convert_to_structure_node(self.structure)
else:
structure_node = self._convert_to_structure_node(self.input_structure)
self.set_trait('structure_node', structure_node)
def _convert_to_structure_node(self, structure):
"""Convert structure of any type to the StructureNode object."""
if structure is None:
return None
StructureNode = DataFactory(self.SUPPORTED_DATA_FORMATS[self.node_class]) # pylint: disable=invalid-name
# If the input_structure trait is set to Atoms object, structure node must be created from it.
if isinstance(structure, Atoms):
return StructureNode(ase=structure)
# If the input_structure trait is set to AiiDA node, check what type
if isinstance(structure, Data):
# Transform the structure to the StructureNode if needed.
if isinstance(structure, StructureNode):
return structure
# Using self.structure, as it was already converted to the ASE Atoms object.
return StructureNode(ase=self.structure)
@observe('structure_node')
def _observe_structure_node(self, change):
"""Modify structure label and description when a new structure is provided."""
struct = change['new']
if struct is None:
self.btn_store.disabled = True
self.structure_label.value = ''
self.structure_label.disabled = True
self.structure_description.value = ''
self.structure_description.disabled = True
return
if struct.is_stored:
self.btn_store.disabled = True
self.structure_label.value = struct.label
self.structure_label.disabled = True
self.structure_description.value = struct.description
self.structure_description.disabled = True
else:
self.btn_store.disabled = False
self.structure_label.value = self.structure.get_chemical_formula()
self.structure_label.disabled = False
self.structure_description.value = ''
self.structure_description.disabled = False
@observe('input_structure')
def _observe_input_structure(self, change):
"""Returns ASE atoms object and sets structure_node trait."""
# If the `input_structure` trait is set to Atoms object, then the `structure` trait should be set to it as well.
self.history = []
if isinstance(change['new'], Atoms):
self.structure = change['new']
# If the `input_structure` trait is set to AiiDA node, then the `structure` trait should
# be converted to an ASE Atoms object.
elif isinstance(change['new'], CifData): # Special treatement of the CifData object
str_io = io.StringIO(change['new'].get_content())
self.structure = ase.io.read(str_io, format='cif', reader='ase', store_tags=True)
elif isinstance(change['new'], StructureData):
self.structure = change['new'].get_ase()
else:
self.structure = None
@observe('structure')
def _structure_changed(self, change=None):
"""Perform some operations that depend on the value of `structure` trait.
This function enables/disables `btn_store` widget if structure is provided/set to None.
Also, the function sets `structure_node` trait to the selected node type.
"""
if not self.structure_set_by_undo:
self.history.append(change['new'])
# If structure trait was set to None, structure_node should become None as well.
if self.structure is None:
self.set_trait('structure_node', None)
self.btn_store.disabled = True
return
self.btn_store.disabled = False
with self.hold_trait_notifications():
self._sync_structure_node()
class StructureUploadWidget(ipw.VBox):
"""Class that allows to upload structures from user's computer."""
structure = Union([Instance(Atoms), Instance(Data)], allow_none=True)
def __init__(self, title='', description="Upload Structure"):
self.title = title
self.file_upload = ipw.FileUpload(description=description, multiple=False, layout={'width': 'initial'})
supported_formats = ipw.HTML(
"""<a href="https://wiki.fysik.dtu.dk/ase/_modules/ase/io/formats.html" target="_blank">
Supported structure formats
</a>""")
self.file_upload.observe(self._on_file_upload, names='value')
super().__init__(children=[self.file_upload, supported_formats])
def _validate_and_fix_ase_cell(self, ase_structure, vacuum_ang=10.0):
"""
Checks if the ase Atoms object has a cell set,
otherwise sets it to bounding box plus specified "vacuum" space
"""
cell = ase_structure.cell
if (np.linalg.norm(cell[0]) < 0.1 or np.linalg.norm(cell[1]) < 0.1 or np.linalg.norm(cell[2]) < 0.1):
# if any of the cell vectors is too short, consider it faulty
# set cell as bounding box + vacuum_ang
bbox = | np.ptp(ase_structure.positions, axis=0) | numpy.ptp |
# -*- coding: utf-8 -*-
"""
Created on Thu May 7 09:12:56 2015
@author: anderson
"""
# importing modules
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
import scipy.signal as sig
from . import adjust_spines
def plot_single_hfo(hfo, envelope = False, xlim =[-1,1], cutoff = None, v = True,
axes = None, figure_size = (15,10),dpi=600,saveplot = None):
"""
Function to plot single Spike
Parameters
----------
hfo: HFOObj
HFO object to plot
envelope: boolean
True (default) - plot envelope of filtered signal
figure_size: tuple
(5,5) (default) - Size of figure, tuple of integers with width, height in inches
dpi: int
600 - DPI resolution
"""
if axes == None:
# Creating the figure
fig = plt.figure(figsize=figure_size,dpi=dpi)
ax1 = fig.add_subplot(311)
ax2 = fig.add_subplot(312)
ax3 = fig.add_subplot(313)
else:
ax1 = axes[0]
ax2 = axes[1]
ax3 = axes[2]
# number of points
npoints = hfo.waveform.shape[0]
time_v = np.linspace(-1,1,npoints,endpoint=True)
# creating the axes
ax1.plot(time_v,hfo.waveform[:,0],'b')
ax1.plot(time_v[hfo.start_idx:hfo.end_idx],hfo.waveform[hfo.start_idx:hfo.end_idx,0],'k')
adjust_spines(ax1, ['left'])
ax1.set_xlim(xlim)
filt = hfo.waveform[:,1]
ax2.plot(time_v,filt)
ax2.plot(time_v[hfo.start_idx:hfo.end_idx],filt[hfo.start_idx:hfo.end_idx],'k')
if envelope:
env = hfo.waveform[:,2]
ax4 = ax2.twinx()
ax4.plot(time_v,env,'g')
adjust_spines(ax2, ['left', 'bottom'])
ax2.set_xlim(xlim)
hfo.spectrum.plot(cutoff = cutoff, v = v, ax = ax3)
ax3.set_title('peak freq = ' + str(hfo.spectrum.peak_freq))
adjust_spines(ax3, ['left', 'bottom'])
if saveplot != None:
if type(saveplot) == str:
plt.savefig(saveplot, bbox_inches='tight')
else:
raise Exception('saveplot should be a string')
plt.draw()
def plot_mean_hfo(evlist,color='blue', xlim =[-1,1], figure_size=(10,10),dpi=600,saveplot = None):
"""
Function to plot cluster of HFOs
Parameters
----------
evlist: EventList
EventList object to plot
color: str
Color of plot
figure_size: tuple
(5,5) (default) - Size of figure, tuple of integers with width, height in inches
dpi: int
600 - DPI resolution
"""
f = plt.figure(figsize=figure_size,dpi=dpi)
raw = np.array([]) # creating a empty array
filt = np.array([]) # creating a empty array
pxx = np.array([]) # creating a empty array
nwave, a = evlist[0].waveform.shape
time_v = np.linspace(-1,1,nwave,endpoint=True)
npw, = evlist[0].spectrum.nPxx.shape
F = evlist[0].spectrum.F
for hfo in evlist:
raw = np.append(raw, hfo.waveform[:,0])
#ax1.plot(time_v,hfo.waveform[:,0],lw=.5)
filt = np.append(filt, hfo.waveform[:,1])
#ax2.plot(time_v,hfo.waveform[:,1],lw=.5)
pxx = | np.append(pxx, hfo.spectrum.nPxx) | numpy.append |
import numpy as np
import tensorflow as tf
from tensorflow.contrib import rnn
def _print_success_message():
print('Tests Passed')
def test_create_lookup_tables(create_lookup_tables):
with tf.Graph().as_default():
test_text = '''
Moe_Szyslak Moe's Tavern Where the elite meet to drink
Bart_Simpson Eh yeah hello is Mike there Last name Rotch
Moe_Szyslak Hold on I'll check <NAME>ch Mike Rotch Hey has anybody seen Mike Rotch lately
Moe_Szyslak Listen you little puke One of these days I'm gonna catch you and I'm gonna carve my name on your back with an ice pick
Moe_Szyslak Whats the matter Homer You're not your normal effervescent self
Homer_Simpson I got my problems Moe Give me another one
Moe_Szyslak Homer hey you should not drink to forget your problems
Barney_Gumble Yeah you should only drink to enhance your social skills'''
test_text = test_text.lower()
test_text = test_text.split()
vocab_to_int, int_to_vocab = create_lookup_tables(test_text)
# Check types
assert isinstance(vocab_to_int, dict),\
'vocab_to_int is not a dictionary.'
assert isinstance(int_to_vocab, dict),\
'int_to_vocab is not a dictionary.'
# Compare lengths of dicts
assert len(vocab_to_int) == len(int_to_vocab),\
'Length of vocab_to_int and int_to_vocab don\'t match. ' \
'vocab_to_int is length {}. int_to_vocab is length {}'.format(len(vocab_to_int), len(int_to_vocab))
# Make sure the dicts have the same words
vocab_to_int_word_set = set(vocab_to_int.keys())
int_to_vocab_word_set = set(int_to_vocab.values())
assert not (vocab_to_int_word_set - int_to_vocab_word_set),\
'vocab_to_int and int_to_vocab don\'t have the same words.' \
'{} found in vocab_to_int, but not in int_to_vocab'.format(vocab_to_int_word_set - int_to_vocab_word_set)
assert not (int_to_vocab_word_set - vocab_to_int_word_set),\
'vocab_to_int and int_to_vocab don\'t have the same words.' \
'{} found in int_to_vocab, but not in vocab_to_int'.format(int_to_vocab_word_set - vocab_to_int_word_set)
# Make sure the dicts have the same word ids
vocab_to_int_word_id_set = set(vocab_to_int.values())
int_to_vocab_word_id_set = set(int_to_vocab.keys())
assert not (vocab_to_int_word_id_set - int_to_vocab_word_id_set),\
'vocab_to_int and int_to_vocab don\'t contain the same word ids.' \
'{} found in vocab_to_int, but not in int_to_vocab'.format(vocab_to_int_word_id_set - int_to_vocab_word_id_set)
assert not (int_to_vocab_word_id_set - vocab_to_int_word_id_set),\
'vocab_to_int and int_to_vocab don\'t contain the same word ids.' \
'{} found in int_to_vocab, but not in vocab_to_int'.format(int_to_vocab_word_id_set - vocab_to_int_word_id_set)
# Make sure the dicts make the same lookup
missmatches = [(word, id, id, int_to_vocab[id]) for word, id in vocab_to_int.items() if int_to_vocab[id] != word]
assert not missmatches,\
'Found {} missmatche(s). First missmatch: vocab_to_int[{}] = {} and int_to_vocab[{}] = {}'.format(
len(missmatches),
*missmatches[0])
assert len(vocab_to_int) > len(set(test_text))/2,\
'The length of vocab seems too small. Found a length of {}'.format(len(vocab_to_int))
_print_success_message()
def test_get_batches(get_batches):
with tf.Graph().as_default():
test_batch_size = 128
test_seq_length = 5
test_int_text = list(range(1000*test_seq_length))
batches = get_batches(test_int_text, test_batch_size, test_seq_length)
# Check type
assert isinstance(batches, np.ndarray),\
'Batches is not a Numpy array'
# Check shape
assert batches.shape == (7, 2, 128, 5),\
'Batches returned wrong shape. Found {}'.format(batches.shape)
for x in range(batches.shape[2]):
assert np.array_equal(batches[0,0,x], np.array(range(x * 35, x * 35 + batches.shape[3]))),\
'Batches returned wrong contents. For example, input sequence {} in the first batch was {}'.format(x, batches[0,0,x])
assert np.array_equal(batches[0,1,x], np.array(range(x * 35 + 1, x * 35 + 1 + batches.shape[3]))),\
'Batches returned wrong contents. For example, target sequence {} in the first batch was {}'.format(x, batches[0,1,x])
last_seq_target = (test_batch_size-1) * 35 + 31
last_seq = np.array(range(last_seq_target, last_seq_target+ batches.shape[3]))
last_seq[-1] = batches[0,0,0,0]
assert | np.array_equal(batches[-1,1,-1], last_seq) | numpy.array_equal |
import numpy as np
from .utils import log_nowarn, squared_distance_matrix
from .checks import _check_size, _check_labels
def hgda_train(X, Y, priors=None):
"""Train a heteroscedastic GDA classifier.
Parameters
----------
X : ndarray, shape (m, n)
training features.
Y : ndarray, shape (m,)
training labels with values in {0, ..., k - 1}.
priors : ndarray, shape (k,)
Prior probabilities for the classes (if None they get
estimated from Y).
Returns
-------
means : ndarray, shape (k, n)
class mean vectors.
invcovs : ndarray, shape (k, n, n)
inverse of the class covariance matrices.
priors : ndarray, shape (k,)
class prior probabilities.
"""
_check_size("mn, m, k?", X, Y, priors)
Y = _check_labels(Y)
k = Y.max() + 1
m, n = X.shape
means = np.empty((k, n))
invcovs = np.empty((k, n, n))
if priors is None:
priors = np.bincount(Y) / m
for c in range(k):
means[c, :] = X[Y == c, :].mean(0)
cov = np.cov(X[Y == c, :].T)
invcovs[c, :, :] = np.linalg.inv(cov)
return means, invcovs, priors
def hgda_inference(X, means, invcovs, priors):
"""Heteroscedastic GDA inference.
Parameters
----------
X : ndarray, shape (m, n)
input features (one row per feature vector).
means : ndarray, shape (k, n)
class mean vectors.
invcovs : ndarray, shape (k, n, n)
inverse of the class covariance matrices.
priors : ndarray, shape (k,)
class prior probabilities.
Returns
-------
ndarray, shape (m,)
predicted labels (one per feature vector).
ndarray, shape (m, k)
scores assigned to each class.
"""
_check_size("mn, kn, knn, k", X, means, invcovs, priors)
m, n = X.shape
k = means.shape[0]
scores = np.empty((m, k))
for c in range(k):
det = np.linalg.det(invcovs[c, :, :])
diff = X - means[c, :]
q = ((diff @ invcovs[c, :, :]) * diff).sum(1)
scores[:, c] = 0.5 * q - 0.5 * np.log(det) - log_nowarn(priors[c])
labels = np.argmin(scores, 1)
return labels, -scores
def ogda_train(X, Y, priors=None):
"""Train a omoscedastic GDA classifier.
Parameters
----------
X : ndarray, shape (m, n)
training features.
Y : ndarray, shape (m,)
training labels with values in {0, ..., k - 1}.
priors : ndarray, shape (k,)
Prior probabilities for the classes (if None they get
estimated from Y).
Returns
-------
W : ndarray, shape (n, k)
weight vectors, each row representing a different class.
b : ndarray, shape (k,)
vector of biases.
"""
_check_size("mn, m, k?", X, Y, priors)
Y = _check_labels(Y)
k = Y.max() + 1
m, n = X.shape
means = np.empty((k, n))
cov = np.zeros((n, n))
if priors is None:
priors = | np.bincount(Y) | numpy.bincount |
import numpy as np
from numpy.linalg import inv
# Kalman Filter Class
class KalmanFilter:
"""
Simple Kalman filter
"""
def __init__(self, XY, B=np.array([0]), M=np.array([0])):
stateMatrix = np.zeros((4, 1), np.float32) # [x, y, delta_x, delta_y]
if XY != 0:
stateMatrix = np.array([[XY[0]], [XY[1]], [0], [0]], np.float32)
# np.array([XY[0], XY[1],0,0],np.float32)
estimateCovariance = np.eye(stateMatrix.shape[0])
transitionMatrix = np.array([[1, 0, 1, 0], [0, 1, 0, 1], [0, 0, 1, 0], [0, 0, 0, 1]], np.float32)
processNoiseCov = np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]], np.float32) * 0.001
measurementStateMatrix = np.zeros((2, 1), np.float32)
observationMatrix = np.array([[1, 0, 0, 0], [0, 1, 0, 0]], np.float32)
measurementNoiseCov = np.array([[1, 0], [0, 1]], np.float32) * 13
X=stateMatrix
P=estimateCovariance
F=transitionMatrix
Q=processNoiseCov
Z=measurementStateMatrix
H=observationMatrix
R=measurementNoiseCov
"""
Initialise the filter
Args:
X: State estimate
P: Estimate covariance
F: State transition model
B: Control matrix
M: Control vector
Q: Process noise covariance
Z: Measurement of the state X
H: Observation model
R: Observation noise covariance
"""
self.X = X
self.P = P
self.F = F
self.B = B
self.M = M
self.Q = Q
self.Z = Z
self.H = H
self.R = R
def predict(self):
"""
Predict the future state
Args:
self.X: State estimate
self.P: Estimate covariance
self.B: Control matrix
self.M: Control vector
Returns:
updated self.X
"""
# Project the state ahead
self.X = self.F @ self.X + self.B @ self.M
self.P = self.F @ self.P @ self.F.T + self.Q
return self.X
def correct(self, Z):
"""
Update the Kalman Filter from a measurement
Args:
self.X: State estimate
self.P: Estimate covariance
Z: State measurement
Returns:
updated X
"""
K = self.P @ self.H.T @ inv(self.H @ self.P @ self.H.T + self.R)
self.X += K @ (Z - self.H @ self.X)
self.P = self.P - K @ self.H @ self.P
return self.X
"""
needed variables to instantly initialize kalman with just parsing X,Y variables (x,y)
"""
def init_kalman(XY):
kalman = None
stateMatrix = np.zeros((4, 1), np.float32) # [x, y, delta_x, delta_y]
if XY != 0:
stateMatrix = np.array([[XY[0]], [XY[1]],[0],[0]],np.float32)
# np.array([XY[0], XY[1],0,0],np.float32)
estimateCovariance = np.eye(stateMatrix.shape[0])
transitionMatrix = np.array([[1, 0, 1, 0], [0, 1, 0, 1], [0, 0, 1, 0], [0, 0, 0, 1]], np.float32)
processNoiseCov = np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]], np.float32) * 0.001
measurementStateMatrix = np.zeros((2, 1), np.float32)
observationMatrix = np.array([[1, 0, 0, 0], [0, 1, 0, 0]], np.float32)
measurementNoiseCov = | np.array([[1, 0], [0, 1]], np.float32) | numpy.array |
# Copyright (c) 2016 by <NAME> and the other collaborators on GitHub at
# https://github.com/rmjarvis/Piff All rights reserved.
#
# Piff is free software: Redistribution and use in source and binary forms
# with or without modification, are permitted provided that the following
# conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the disclaimer given in the accompanying LICENSE
# file.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the disclaimer given in the documentation
# and/or other materials provided with the distribution.
"""
.. module:: interp
"""
from __future__ import print_function
import numpy as np
import scipy.linalg
import galsim
import warnings
from .interp import Interp
from .star import Star, StarFit
class BasisInterp(Interp):
r"""An Interp class that works whenever the interpolating functions are
linear sums of basis functions. Does things the "slow way" to be stable to
degenerate fits to individual stars, instead of fitting to parameter sets
produced by single stars.
First time coding this we will assume that each element of the PSF parameter
vector p is a linear combination of the same set of basis functions across the
focal plane,
.. math::
p_i = \sum_{j} q_{ij} K_j(u,v,other stellar params).
The property degenerate_points is set to True to indicate that this interpolator
uses the alpha/beta quadratic form of chisq for each sample, rather than assuming
that a best-fit parameter vector is available at every sample.
Internally we'll store the interpolation coefficients in a 2d array of dimensions
(nparams, nbases)
Note: This is an abstract base class. The concrete class you probably want to use
is BasisPolynomial.
"""
def __init__(self):
self.degenerate_points = True # This Interpolator uses chisq quadratic forms
self.use_qr = False # The default. May be overridden by subclasses.
self.q = None
def initialize(self, stars, logger=None):
"""Initialize both the interpolator to some state prefatory to any solve iterations and
initialize the stars for use with this interpolator.
This class will initialize everything to have constant PSF parameter vector taken
from the first Star in the list.
:param stars: A list of Star instances to use to initialize.
:param logger: A logger object for logging debug info. [default: None]
:returns: A new list of Stars which have their parameters initialized.
"""
c = np.mean([s.fit.params for s in stars], axis=0)
self.q = c[:,np.newaxis] * self.constant(1.)[np.newaxis,:]
stars = self.interpolateList(stars)
return stars
def basis(self, star):
"""Return 1d array of polynomial basis values for this star
:param star: A Star instance
:returns: 1d numpy array with values of u^i v^j for 0<i+j<=order
"""
raise NotImplementedError("Cannot call `basis` for abstract base class BasisInterp. "
"You probably want to use BasisPolynomial.")
def constant(self, value=1.):
"""Return 1d array of coefficients that represent a polynomial with constant value.
:param value: The value to use as the constant term. [default: 1.]
:returns: 1d numpy array with values of u^i v^j for 0<i+j<=order
"""
raise NotImplementedError("Cannot call `constant` for abstract base class BasisInterp. "
"You probably want to use BasisPolynomial.")
def solve(self, stars, logger=None):
"""Solve for the interpolation coefficients given some data.
The StarFit element of each Star in the list is assumed to hold valid
alpha and beta members specifying depending of chisq on differential
changes to its parameter vector.
:param stars: A list of Star instances to interpolate between
:param logger: A logger object for logging debug info. [default: None]
"""
logger = galsim.config.LoggerWrapper(logger)
if self.q is None:
raise RuntimeError("Attempt to solve() before initialize() of BasisInterp")
# The inputs to this function are for each star i the design equation is
#
# A_i p = b_i
#
# The parameters at each stars location are modeled as
#
# p_i = q . K_i(u_i,v_i,...)
#
# where K is the basis vector for the particular location of this star and q is
# our 2d array of fitting parameters. There is a row in q for each element of p,
# and a column in q for each element in K.
#
# Each star then gives us nparam equations, which become rows of our full design matrix.
# Consider the first equation for one star:
#
# A[0,:] p = b[0]
# A[0,:] q K = b[0]
#
# Each element in the A[0,:]T KT outer product is the coefficient of a single element q_mn.
# So we can rewrite this as
#
# (A[0,:,np.newaxis] * K[np.newaxis,:]).flatten() * q.flatten() = b[0]
#
# Thus, the equation we want for our big design matrix has rows with
#
# (A_i[j,:,np.newaxis] * K_i[np.newaxis,:]).flatten()
#
# and the b vector has elements
#
# b_i[j]
#
# Now, the typical usage of this class is such that the size of A is
#
# (nstars * npixels x nparam * nbasis)
#
# Typical numbers are:
#
# nstars ~ 100
# npixels ~ 500
# nparam ~ 400
# nbasis ~ 6
#
# So the size of A is ~ 50,000 x 2,400, which for double precision is about 1 GB.
# Considering that a particular CCD in a dense field might have up to 10 times this many
# stars, this is a very steep memory demand for this function.
#
# Therefore, we make the default behavior be to construct AT A and solve AT A dq = AT b.
# But we have an option to use QR decomposition of A instead, which may have stability
# advantages, since the condition of AT A is the square of the condition of A, so the
# QR method often has fewer numerical problems than the direct method for large matrices.
# It just requires a lot of memory.
logger = galsim.config.LoggerWrapper(logger)
if self.use_qr:
self._solve_qr(stars, logger)
else:
self._solve_direct(stars, logger)
def _solve_qr(self, stars, logger):
"""The implementation of solve() when use_qr = True.
"""
# First, build up one chunk for each star. We'll stack them later.
A_chunks = []
b_chunks = []
for s in stars:
# Get the basis function values at this star
K = self.basis(s)
b_chunks.append(s.fit.b)
A_chunks.append((s.fit.A[:,:,np.newaxis] * K[np.newaxis,:]).reshape(
s.fit.A.shape[0], s.fit.A.shape[1] * len(K)))
# Now stack the chunks into a single A and b.
A = np.vstack(A_chunks)
b = np.concatenate(b_chunks)
if A.shape[0] < A.shape[1]:
raise RuntimeError("Too few constraints for solution. (Probably too few stars)")
# Note: The following snippet is the straightforward way to do this using the
# scipy qr function. However, it generates the full Q matrix, which is slow.
# Using the lapack functions directly is slightly more obfuscated, but faster.
#Q,R = scipy.linalg.qr(A, mode='economic', overwrite_a=True)
#dq = Q.T.dot(b)
#dq = scipy.linalg.solve_triangular(R, dq, overwrite_b=True, check_finite=False)
# This computes A -> Q R, where QR are stored in a single matrix along with an
# ancillary tau matrix that helps define the Householder matrices used to build
# the real Q.
QR, tau, work, info = scipy.linalg.lapack.dgeqrf(A)
m = A.shape[1]
# Check the diagonal values of the R matrix. The following calculation isn't a
# real condition number, since the regular QR decomposition isn't actually rank
# revealing. However, we only get problems with the normal QR decomposition if
# this number is very small, in which case we should switch to a QRP decomposition.
abs_Rdiag = np.abs(np.diag(QR))
cond = np.min(abs_Rdiag) / np.max(abs_Rdiag)
if cond < 1.e-12:
# Note: this calculation is much slower, but it is safe to use even for
# singular inputs, so it will always produce a valid answer.
logger.info('Nominal condition is %s (min, max = %s, %s)', cond,
np.min(abs_Rdiag), np.max(abs_Rdiag))
logger.info('Switching to QRP solution')
QR, P, tau, work, info = scipy.linalg.lapack.dgeqp3(A, overwrite_a=True)
P[:] -= 1 # Switch to python 0-based indexing.
abs_Rdiag = np.abs(np.diag(QR))
cond = np.min(abs_Rdiag) / np.max(abs_Rdiag)
logger.info('Condition for QRP is %s (min, max = %s, %s)', cond,
np.min(abs_Rdiag), np.max(abs_Rdiag))
# Skip any rows of R that have essentially 0 on the diagonal.
k = np.sum(abs_Rdiag > 1.e-15 * np.max(abs_Rdiag))
logger.debug('k = %d, m = %d',k,m)
else:
P = None
k = m
# The next steps are the same regardless of whether we pivoted or not.
# This computes y = Q.T b
dq, work, info = scipy.linalg.lapack.dormqr('L', 'T', QR, tau, b, work)
# Cut dq down to the first m elements, since it is still the size of b here.
dq = dq[:m]
# Solve R dq = y (in place)
scipy.linalg.lapack.dtrtrs(QR[:k,:k], dq[:k], overwrite_b=True)
if P is not None:
# Apply the permuation if we have one.
dq1 = dq
dq1[k:m] = 0.
dq = | np.empty(m) | numpy.empty |
from __future__ import division # Python 2 users only
from __future__ import print_function
__doc__ = """ numpy utility functions used for tSNE modelling"""
import numpy as np
def Hbeta_vec(distances, betas):
"""
Function that computes the Gaussian kernel values given a vector of
squared Euclidean distances, and the precision of the Gaussian kernel.
The function also computes the perplexity of the distribution.
From Parametric t-SNE for matlab at https://lvdmaaten.github.io/tsne/
Parameters
----------
distances: 2-d array_like, (N,N)
Square matrix of distances between data points
betas: 1-d array_like, (N,)
Vector of precisions of the Gaussian kernel. beta = (2 sigma**2)^-1
Returns
-------
H: 1-d array_like, (N,)
Entropy of each point
p_matr: 2-d array_like, (N,N)
array of probability values
The scalar formula for p_matr is:
p_matr = np.exp(-D * beta) / sum(np.exp(-D * beta))
This funcion is vectorized and calculates the full P matrix
"""
beta_matr = (betas[:,np.newaxis] * np.ones_like(distances))
p_matr = np.exp(-distances * beta_matr)
sumP = np.sum(p_matr, axis=1)
H = np.log(sumP) + (betas * np.sum(distances * p_matr, axis=1)) / sumP
p_matr = p_matr / (sumP[:,np.newaxis]*np.ones_like(p_matr))
return H, p_matr
def Hbeta_scalar(distances, beta):
"""
Function that computes the Gaussian kernel values given a vector of
squared Euclidean distances, and the precision of the Gaussian kernel.
The function also computes the perplexity of the distribution.
From Parametric t-SNE for matlab at https://lvdmaaten.github.io/tsne/
Parameters
----------
distances: 1-d array_like, (N,)
Distance between the current data point and all others
beta: float
Precision of the Gaussian kernel. beta = (2 sigma**2)^-1
Returns
-------
H: float
Entropy
p_matr: 1-d array_like, (N,)
array of probability values
p_matr = np.exp(-D * beta) / sum(np.exp(-D * beta))
"""
p_matr = np.exp(-distances * beta)
sumP = np.sum(p_matr)
H = | np.log(sumP) | numpy.log |
import numpy as np
from sklearn.model_selection import train_test_split
import catboost
class CatBoostClassifier:
def __init__(self, **params):
"""
Catboost Classifier wrapper
"""
default = {'verbose': 0, 'n_estimators': 1000, 'allow_writing_files': False}
for k, v in default.items():
if k not in params.keys():
params[k] = v
self.default = default
self.params = params
self.model = catboost.CatBoostClassifier(**params)
self.hasPredictProba = True
self.classes_ = None
self.trained = False
self.callbacks = None
self.verbose = 0
self.early_stopping_rounds = 100
if 'early_stopping_rounds' in params.keys():
self.early_stopping_rounds = params.pop('early_stopping_rounds')
if 'verbose' in params.keys():
self.verbose = params.pop('verbose')
self.set_params(**params)
self._estimator_type = 'classifier'
def set_params(self, **params):
for k, v in self.default.items():
if k not in params.keys():
params[k] = v
self.model.set_params(**params)
return self
def get_params(self, **args):
return self.model.get_params(**args)
def fit(self, X, y):
# Split data
train_x, test_x, train_y, test_y = train_test_split(X, y, stratify=y, test_size=0.1)
# Set Attributes
self.classes_ = | np.unique(y) | numpy.unique |
"""
Parallax fitting and computation of distances
"""
import os
import warnings
import collections
from bisect import bisect_left
import h5py
import numpy as np
import scipy.stats
from scipy.interpolate import interp1d
from astropy.coordinates import SkyCoord
from healpy import ang2pix
from dustmaps.sfd import SFDQuery
from dustmaps.bayestar import BayestarWebQuery
from astropy.utils.exceptions import AstropyWarning
import basta.utils_distances as udist
import basta.constants as cnsts
import basta.stats as stats
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
# Don't print Astropy warnings (catch error caused by mock'ing astropy in Sphinx)
try:
warnings.filterwarnings("ignore", category=AstropyWarning)
except AssertionError:
pass
try:
from basta._dustpath import __dustpath__
except ModuleNotFoundError:
print("\nCannot find path to dustmaps. Did you run 'setup.py'?\n")
raise
def LOS_reddening(distanceparams):
"""
Returns color excess E(B-V) for a line of sight using a
pre-downloaded 3D extinction map provided by Green et al. 2015/2018 - see
http://argonaut.skymaps.info/.
The extinction map is only computed for distance modulus between
:math:`4 < m-M < 19` in units of magnitude.
Parameters
----------
distanceparams : dictionary
Dictionary with distance parameters
Returns
-------
EBV : function
excess color function
"""
if "EBV" in distanceparams:
return lambda x: np.asarray(
np.random.normal(
distanceparams["EBV"][1],
distanceparams["EBV"][2] - distanceparams["EBV"][1],
size=[
len(i) if isinstance(i, collections.Iterable) else 1 for i in [x]
][0],
)
)
frame = distanceparams["dustframe"]
# Convert to galactic coordinates
if frame == "icrs":
ra = distanceparams["RA"]
dec = distanceparams["DEC"]
c = SkyCoord(ra=ra, dec=dec, frame="icrs", unit="deg")
elif frame == "galactic":
lon = distanceparams["lon"]
lat = distanceparams["lat"]
c = SkyCoord(l=lon, b=lat, frame="galactic", unit="deg")
else:
raise ValueError("Unknown dust map frame for computing reddening!")
# Load extinction data cube
pathmap = os.path.join(__dustpath__, "bayestar/bayestar2019.h5")
dcube = h5py.File(pathmap, "r")
# Distance modulus bins
bin_edges = dcube["/pixel_info"].attrs["DM_bin_edges"]
dmbin = bin_edges + (bin_edges[1] - bin_edges[0]) / 2.0
# If webquery fails use local copy of dustmap
try:
bayestar = BayestarWebQuery(version="bayestar2019")
Egr_samples = bayestar(c, mode="samples")
except Exception:
# contains positional info
pinfo = dcube["/pixel_info"][:]
nsides = np.unique(dcube["/pixel_info"][:]["nside"])
# Convert coordinates to galactic frame
lon = c.galactic.l.deg
lat = c.galactic.b.deg
# Convert l,b[deg] to theta,phi[rad]
theta = np.pi / 2.0 - lat * np.pi / 180.0
phi = lon * np.pi / 180.0
# To check if we are within the maps coordinates
Egr_samples = np.array([np.nan])
# Run through nsides
for ncont in reversed(nsides):
healpixNside = ang2pix(ncont, theta, phi, nest=True)
indNside = np.where(np.asarray([x[0] for x in pinfo]) == ncont)[0]
dcubepixNside = [x[1] for x in pinfo[indNside]]
kNside = int(bisect_left(dcubepixNside, healpixNside)) + indNside[0]
if healpixNside == dcubepixNside[kNside - indNside[0]]:
index = kNside
Egr_samples = dcube["/samples"][index]
break
# If coordinates outside dust map, use Schegel
if np.isnan(Egr_samples).any():
print("WARNING: Coordinates outside dust map boundaries!")
print("Default to Schegel 1998 dust map")
sfd = SFDQuery()
EBV_fun = lambda x: np.full_like(x, sfd(c))
return EBV_fun
Egr_med, Egr_err = [], []
for i in range(len(dmbin)):
Egr_med.append(np.nanmedian(Egr_samples[:, i]))
Egr_err.append(np.nanstd(Egr_samples[:, i]))
Egr_med_fun = interp1d(
dmbin, Egr_med, bounds_error=False, fill_value=(0, np.max(Egr_med))
)
Egr_err_fun = interp1d(
dmbin, Egr_err, bounds_error=False, fill_value=np.max(Egr_err)
)
dcube.close()
def EBV_fun(dm):
Egr = np.asarray(np.random.normal(Egr_med_fun(dm), Egr_err_fun(dm)))
EBV = cnsts.extinction.Conv_Bayestar * Egr
return EBV
return EBV_fun
def get_EBV(dist, LOS_EBV, debug=False, outfilename=""):
"""
Estimate E(B-V) by drawing distances from a normal parallax
distribution with EDSD prior.
Parameters
-----
dist : array
The drawn distances
LOS_EBV : func
EBV function.
debug : bool, optional
Debug flag.
If True, this function outputs two plots, one of distance modulus
vs. E(B-V) and a histogram of the E(B-V).
outfilename : str, optional
Name of directory of where to put plots outputted if debug is True.
Returns
-------
EBVs : array
E(B-V) at distances
"""
dmod = 5 * np.log10(dist / 10)
EBVs = LOS_EBV(dmod)
if debug:
plt.figure()
plt.plot(dmod, EBVs, ".")
plt.xlabel("dmod")
plt.ylabel("E(B-V)")
plt.savefig(outfilename + "_DEBUG_dmod_EBVs.png")
plt.close()
return EBVs
def get_absorption(EBV, fitparams, filt):
"""
Compute extinction coefficient Rzeta for band zeta.
Using parameterized law from Casagrande & VandenBerg 2014.
Valid for:
logg = 4.1
Teff = 5250 - 7000K
Fe/H = -2.0 - 0.25
a/Fe = -0.4 - 0.4
Assume nominal reddening law with RV=3.1. In a band zeta, Azeta = Rzeta*E(B-V).
Parameters
----------
EBV : array
E(B-V) values
fitparams : dict
The fitting params in inputparams.
filt : str
Name of the given filter
Returns
-------
R*EBV : array
Extinction coefficient times E(B-V)
"""
N = len(EBV)
table = cnsts.extinction.R
i_filter = table["Filter"] == filt
if not any(i_filter) or table["RZ_mean"][i_filter] == 0:
print("WARNING: Unknown extinction coefficient for filter: " + filt)
print(" Using reddening law coefficient R = 0.")
return np.zeros(N)
metal = "MeH" if "MeH" in fitparams else "FeH"
if "Teff" not in fitparams or metal not in fitparams:
R = np.ones_like(EBV) * table["RZ_mean"][i_filter].item()
else:
Teff_val, Teff_err = fitparams["Teff"]
metal_val, metal_err = fitparams[metal]
Teff = np.random.normal(Teff_val, Teff_err, size=N)
FeH = np.random.normal(metal_val, metal_err, size=N)
a0 = table["a0"][i_filter].item()
a1 = table["a1"][i_filter].item()
a2 = table["a2"][i_filter].item()
a3 = table["a3"][i_filter].item()
T4 = 1e-4 * Teff
R = a0 + T4 * (a1 + a2 * T4) + a3 * FeH
return R * EBV
def add_absolute_magnitudes(
inputparams, n=1000, k=1000, outfilename="", debug=False, use_gaussian_priors=False
):
"""
Convert apparent magnitudes to absolute magnitudes using the distance
Extinction E(B-V) is estimated based on Green et al. (2015) dust map.
Extinction is converted to reddening using Casagrande & VandenBerg 2014.
The converted colors and magnitudes are added to fitsparams.
Parameters
----------
inputparams : dict
Inputparams used in BASTA run.
n : int
Number of samples from parallax range
k : int
Number of samples from apparent magnitude range.
outfilename : str, optional
Name of directory of where to put plots outputted if debug is True.
debug : bool, optional
Debug flag. If True, debugging plots will be outputted.
use_gaussian_priors : bool, optional
If True, gaussian priors will be used for apparent magnitude in
the distance computation.
Returns
-------
inputparams : dict
Modified version of inputparams including absolute magnitudes.
"""
if "parallax" not in inputparams["fitparams"]:
return inputparams
print("\nPreparing distance/parallax/magnitude input ...", flush=True)
qs = [0.158655, 0.5, 0.841345]
fitparams = inputparams["fitparams"]
distanceparams = inputparams["distanceparams"]
if use_gaussian_priors:
inputparams["fitparams"][filt] = [val, std]
return inputparams
# Get apparent magnitudes from input data
mobs = distanceparams["m"]
mobs_err = distanceparams["m_err"]
if len(mobs.keys()) == 0:
raise ValueError("No filters were given")
# Convert the inputted parallax in mas to as
plxobs = fitparams["parallax"][0] * 1e-3
plxobs_err = fitparams["parallax"][1] * 1e-3
L = udist.EDSD(None, None) * 1e3
fitparams.pop("parallax")
# Sample distances more densely around the mode of the distance distribution
# See Bailer-Jones 2015, Eq 19
coeffs = [1 / L, -2, plxobs / (plxobs_err**2), -1 / (plxobs_err**2)]
roots = np.roots(coeffs)
if np.sum((np.isreal(roots))) == 1:
(mode,) = np.real(roots[np.isreal(roots)])
else:
assert np.sum((np.isreal(roots))) == 3
if plxobs >= 0:
mode = np.amin(np.real(roots[np.isreal(roots)]))
else:
(mode,) = np.real(roots[roots > 0])
# By sampling linearly in quantiles, the probablity mass is equal for the samples
bla = scipy.stats.norm.cdf(0, loc=mode, scale=1000) + 0.01
dist = scipy.stats.norm.ppf(
np.linspace(bla, 0.96, n - n // 2), loc=mode, scale=1000
)
# We also want to sample across the entire range.
lindist = 10 ** np.linspace(-0.4, 4.4, n // 2)
assert np.all(np.isfinite(dist))
assert np.all(dist > 0)
dist = np.concatenate([dist, lindist])
dist = np.sort(dist)
lldist = udist.compute_distlikelihoods(
dist, plxobs, plxobs_err, L, outfilename=outfilename, debug=debug
)
dists = np.repeat(dist, k)
lldists = np.repeat(lldist, k)
# Get EBV values
LOS_EBV = LOS_reddening(distanceparams)
EBV = get_EBV(dist, LOS_EBV, debug=debug, outfilename=outfilename)
EBVs = np.repeat(EBV, k)
distanceparams["As"] = {}
llabsms_joined = np.zeros(n * k)
for filt in mobs.keys():
# Sample apparent magnitudes over the entire parameter range
if filt in cnsts.distanceranges.filters:
m = np.linspace(
cnsts.distanceranges.filters[filt]["min"],
cnsts.distanceranges.filters[filt]["max"],
k - k // 2,
)
else:
m = np.linspace(-10, 25, k - k // 2)
m = np.concatenate(
[
m,
scipy.stats.norm.ppf(
np.linspace(0.04, 0.96, k // 2),
loc=mobs[filt],
scale=mobs_err[filt],
),
]
)
m = np.sort(m)
llm = udist.compute_mslikelihoods(m, mobs[filt], mobs_err[filt])
ms = np.tile(m, n)
llms = np.tile(llm, n)
assert len(dists) == len(ms) == n * k
A = get_absorption(EBV, fitparams, filt)
As = np.repeat(A, k)
absms = udist.compute_absmag(ms, dists, As)
# Construct likelihood distribution
llabsms = llms + lldists
llabsms_joined += llabsms
llabsms -= | np.amax(llabsms) | numpy.amax |
#Differential Photometry script written in April 2019 by SKB, MP, KWD for WIYN 0.9m HDI data
#This script calculates photometry and differential photometry for all stars in an image and takes target positions to pull out differential photometry of target stars. Auto calculates comparison stars based on lowest percentile of variability of stars in the image.
# Script is run through a shell jupyter notebook script.
#Initially created by <NAME> as a juypter notebook 2018
#Turned into modular form by <NAME> April 2019
#Modified by <NAME>, <NAME>, <NAME> April 2019
# python 2/3 compatibility
from __future__ import print_function
# numerical python
import numpy as np
# file management tools
import glob
import os
# good module for timing tests
import time
# plotting stuff
import matplotlib.pyplot as plt
import matplotlib.cm as cm
# ability to read/write fits files
from astropy.io import fits
# fancy image combination technique
from astropy.stats import sigma_clip
# median absolute deviation: for photometry
from astropy.stats import mad_std
# photometric utilities
from photutils import DAOStarFinder,aperture_photometry, CircularAperture, CircularAnnulus, Background2D, MedianBackground
# periodograms
from astropy.stats import LombScargle
from regions import read_ds9, write_ds9
from astropy.wcs import WCS
import warnings
import pandas as pd
from astropy.coordinates import SkyCoord
from astropy import units as u
from astropy.visualization import ZScaleInterval
import numpy.ma as ma
warnings.filterwarnings("ignore")
np.set_printoptions(suppress=True)
def construct_astrometry(hdr_wcs):
'''
construct_astrometry
make the pixel to RA/Dec conversion (and back) from the header of an astrometry.net return
inputs
------------------------------
hdr_wcs : header with astrometry information, typically from astrometry.net
returns
------------------------------
w : the WCS instance
'''
# initialize the World Coordinate System
w = WCS(naxis=2)
# specify the pixel to RA/Dec conversion
w.wcs.ctype = ["RA---TAN", "DEC--TAN"]
w.wcs.cd = np.array([[hdr_wcs['CD1_1'],hdr_wcs['CD1_2']],[hdr_wcs['CD2_1'],hdr_wcs['CD2_2']]])
w.wcs.crpix = [hdr_wcs['CRPIX1'], hdr_wcs['CRPIX2']]
w.wcs.crval = [hdr_wcs['CRVAL1'],hdr_wcs['CRVAL2']]
w.wcs.cunit = [hdr_wcs['CUNIT1'],hdr_wcs['CUNIT2']]
w.wcs.latpole = hdr_wcs['LATPOLE']
#w.wcs.lonpole = hdr_wcs['LONPOLE']
w.wcs.theta0 = hdr_wcs['LONPOLE']
w.wcs.equinox = hdr_wcs['EQUINOX']
# calculate the RA/Dec to pixel conversion
w.wcs.fix()
w.wcs.cdfix()
w.wcs.set()
# return the instance
return w
def StarFind(imname, FWHM, nsigma):
'''
StarFind
find all stars in a .fits image
inputs
----------
imname: name of .fits image to open.
FWHM: fwhm of stars in field
nsigma: number of sigma above background above which to select sources. (~3 to 4 is a good estimate)
outputs
--------
xpos: x positions of sources
ypos: y positions of sources
nstars: number of stars found in image
'''
#open image
im,hdr=fits.getdata(imname, header=True)
im = np.array(im).astype('float')
#determine background
bkg_sigma = mad_std(im)
print('begin: DAOStarFinder')
daofind = DAOStarFinder(fwhm=FWHM, threshold=nsigma*bkg_sigma, exclude_border=True)
sources = daofind(im)
#x and y positions
xpos = sources['xcentroid']
ypos = sources['ycentroid']
#number of stars found
nstars = len(xpos)
print('found ' + str(nstars) + ' stars')
return xpos, ypos, nstars
def makeApertures(xpos, ypos, aprad,skybuff, skywidth):
'''
makeApertures
makes a master list of apertures and the annuli
inputs
---------
xpos: list - x positions of stars in image
ypos: list - y positions of stars in image
aprad: float - aperture radius
skybuff: float - sky annulus inner radius
skywidth: float - sky annulus outer radius
outputs
--------
apertures: list - list of aperture positions and radius
annulus_apertures: list - list of annuli positions and radius
see: https://photutils.readthedocs.io/en/stable/api/photutils.CircularAperture.html#photutils.CircularAperture
for more details
'''
# make the master list of apertures
apertures = CircularAperture((xpos, ypos), r=aprad)
annulus_apertures = CircularAnnulus((xpos, ypos), r_in=aprad+skybuff, r_out=aprad+skybuff+skywidth)
apers = [apertures, annulus_apertures]
return apertures, annulus_apertures
def apertureArea(apertures):
''' returns the area of the aperture'''
return apertures.area() ### should be apertures
def backgroundArea(back_aperture):
'''returns the area of the annuli'''
return back_aperture.area() ### should be annulus_apertures
def doPhotometry(imglist, xpos, ypos, aprad, skybuff, skywidth,timekey='MJD-OBS',verbose=1):
'''
doPhotomoetry*
determine the flux for each star from aperture photometry
inputs
-------
imglist: list - list of .fits images
xpos, ypos: lists - lists of x and y positions of stars
aprad, skybuff, skywidth: floats - aperture, sky annuli inner, sky annuli outer radii
outputs
-------
Times: list - time stamps of each observation from the .fits header
Photometry: list - aperture photometry flux values found at each xpos, ypos position
'''
#number of images
nimages = len(imglist)
nstars = len(xpos)
print('Found {} images'.format(nimages))
#create lists for timestamps and flux values
Times = np.zeros(nimages)
Photometry = np.zeros((nimages,nstars))
print('making apertures')
#make the apertures around each star
apertures, annulus_apertures = makeApertures(xpos, ypos, aprad, skybuff, skywidth)
#plot apertures
plt.figure(figsize=(12,12))
interval = ZScaleInterval()
vmin, vmax = interval.get_limits(fits.getdata(imglist[0]))
plt.imshow(fits.getdata(imglist[0]), vmin=vmin,vmax=vmax, origin='lower')
apertures.plot(color='white', lw=2)
#annulus_apertures.plot(color='red', lw=2)
plt.title('apertures')
plt.show()
#determine area of apertures
area_of_ap = apertureArea(apertures)
#determine area of annuli
area_of_background = backgroundArea(annulus_apertures)
checknum = np.linspace(0,nimages,10).astype(int)
#go through each image and run aperture photometry
for ind in np.arange(nimages):
if ((ind in checknum) & (verbose==1)):
print('running aperture photometry on image: ', ind )
if (verbose>1):
print('running aperture photometry on image: ', ind )
#open image
data_image, hdr = fits.getdata(imglist[ind], header=True)
#find time stamp and append to list
Times[ind] = hdr[timekey]
#do photometry
phot_table = aperture_photometry(data_image, (apertures,annulus_apertures))
#determine flux: (aperture flux) - [(area of aperture * annuli flux)/area of background ]
flux0 = np.array(phot_table['aperture_sum_0']) - (area_of_ap/area_of_background)*np.array(phot_table['aperture_sum_1'])
#append to list
Photometry[ind,:] = flux0
return Times,Photometry
def doPhotometryError(imglist,xpos, ypos,aprad, skybuff, skywidth, flux0, GAIN=1.3, manual = False, **kwargs):
'''
doPhotometryError
determine error in photometry from background noise
two options:
- use sigma clipping and use whole background
- manually input background box positions as kwargs
inputs
--------
imglist: list - list of .fits images
xpos, ypos: lists - lists of x and y positions of stars
aprad, skybuff, skywidth: floats - aperture, sky annuli inner, sky annuli outer radii
flux0: list - aperture photometry found from doPhotometry() function
GAIN: float - average gain
manual: boolean - switch between manually inputting box (True) or using sigma clipping (False)
if True -- must have kwargs
manual = False is default
**kwargs
kwargs[xboxcorner]: float - x edge of box in pixel coords
kwargs[yboxcorner]: float - y edge of box in pixel coords
kwargs[boxsize]: float - size of box in pixel coords
'''
# find number of images in list
nimages = len(imglist)
nstars = len(xpos)
print('Found {} images'.format(nimages))
#make apertures
apertures, annulus_apertures = makeApertures(xpos, ypos, aprad, skybuff, skywidth)
#find areas of apertures and annuli
area_of_ap = apertureArea(apertures)
area_of_background = backgroundArea(annulus_apertures)
checknum = np.linspace(0,nimages,10).astype(int)
#find error in photometry
ePhotometry = np.zeros((nimages,nstars))
for ind in np.arange(nimages):
#open images
im = fits.getdata(imglist[ind])
if ind in checknum:
print('running error analysis on image ', ind)
#determine variance in background
if manual == True: #manual method -- choose back size
skyvar = np.std(im[kwargs['xboxcorner']:kwargs['xboxcorner']+kwargs['boxsize'],kwargs['yboxcorner']:kwargs['yboxcorner']+kwargs['boxsize']])**2.
err1 = skyvar*(area_of_ap)**2./(kwargs['boxsize']*kwargs['boxsize']) # uncertainty in mean sky brightness
if manual == False: #automatic method -- use sigma clipping
filtered_data = sigma_clip(im, sigma=3)
skyvar = np.std(filtered_data)**2.
err1 = skyvar*(area_of_ap)**2./( | np.shape(im[0]) | numpy.shape |
import numpy as np
import os
import sample_loader
import pathlib
import glob
import re
import sys
sys.path.append("../base/CropSample")
sys.path.append("../base/IntegrateColumnIntoGrid")
import CropSample
import IntegrateColumnIntoGrid
from scipy import ndimage
import JSONHelper
def crop_from_volume(s0, x, y, z):
sdf = np.array([], dtype=np.float32)
pdf = | np.array([], dtype=np.float32) | numpy.array |
from luminaire.model.base_model import BaseModel, BaseModelObject, BaseModelHyperParams
from luminaire.exploration.data_exploration import DataExploration
from luminaire.model.model_utils import LADHolidays
from typing import Dict, Tuple
import pandas as pd
import warnings
warnings.filterwarnings('ignore')
class LADStructuralHyperParams(BaseModelHyperParams):
"""
Exception class for Luminaire structural anomaly detection model.
:param bool include_holidays_exog: whether to include holidays as exogenous variables in the regression.
Holidays are defined in :class:`~model.model_utils.LADHolidays`
:param int p: Order for the AR component of the model.
:param int q: Order for the MA component of the model.
:param bool is_log_transformed: A flag to specify whether to take a log transform of the input data. If the data
contain negatives, is_log_transformed is ignored even though it is set to True.
:param int max_ft_freq: The maximum frequency order for the Fourier transformation.
"""
def __init__(self, include_holidays_exog=True,
p=2,
q=2,
is_log_transformed=True,
max_ft_freq=3):
super(LADStructuralHyperParams, self).__init__(
model_name="LADStructuralModel",
include_holidays_exog=include_holidays_exog,
p=p,
q=q,
is_log_transformed=is_log_transformed,
max_ft_freq=max_ft_freq,
)
class LADStructuralError(Exception):
"""
Exception class for Luminaire structural anomaly detection model.
"""
def __init__(self, message):
message = f'LAD structural failed! Error: {message}'
super(LADStructuralError, self).__init__(message)
class LADStructuralModel(BaseModel):
"""
A LAD structural time series model.
:param dict hyper_params: Hyper parameters for Luminaire structural modeling.
See :class:`luminaire.model.lad_structural.LADStructuralHyperParams` for detailed information.
:param str freq: The frequency of the time-series. A `Pandas offset`_ such as 'D', 'H', or 'M'. Luminaire currently
supports the following pandas frequency types: 'H', 'D', 'W', 'W-SUN', 'W-MON', 'W-TUE', 'W-WED', 'W-THU',
'W-FRI', 'W-SAT'.
:param int min_ts_length: The minimum required length of the time series for training.
:param int max_ts_length: The maximum required length of the time series for training. The input time series will be
truncated if the length is greater than this value.
:param float min_ts_mean: Minimum average values in the most recent window of the time series. This optional
parameter can be used to avoid over-alerting from noisy low volume time series.
:param int min_ts_mean_window: Size of the most recent window to calculate min_ts_mean.
.. Note :: This class should be used to manually configure the structural model. Exact configuration parameters
can be found in `luminaire.model.lad_structural.LADStructuralHyperParams`. Optimal configuration can be
obtained by using Luminaire hyperparameter optimization.
.. _statsmodels docs: http://www.statsmodels.org/stable/generated/statsmodels.tsa.arima_model.ARIMA.html
.. _Pandas offset: https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#dateoffset-objects
>>> hyper = {"include_holidays_exog": 0, "is_log_transformed": 1, "max_ft_freq": 2, "p": 5, "q": 1}
lad_struct_model = LADStructuralModel(hyper_params=hyper, freq='D')
>>> lad_struct_model
<luminaire.model.lad_structural.LADStructuralModel object at 0x103efe320>
"""
__version__ = "2.0"
_target_metric = 'raw'
_imputed_metric = 'interpolated'
_sig_level = 0.10
_sig_level_extreme = 0.001
def __init__(self,
hyper_params: LADStructuralHyperParams().params or None,
freq,
min_ts_length=None,
max_ts_length=None,
min_ts_mean=None,
min_ts_mean_window=None,
**kwargs):
self.hyper_params = hyper_params
max_scoring_length_dict = {
'H': 48,
'D': 10,
'W': 8, 'W-SUN': 8, 'W-MON': 8, 'W-TUE': 8, 'W-WED': 8, 'W-THU': 8, 'W-FRI': 8, 'W-SAT': 8,
'M': 24, 'MS': 24,
}
self.max_scoring_length = max_scoring_length_dict.get(freq)
fit_diagnostic_lag_dict = {
'H': 144 * 2,
'D': 7 * 4,
'W': 12, 'W-SUN': 12, 'W-MON': 12, 'W-TUE': 12, 'W-WED': 12, 'W-THU': 12, 'W-FRI': 12, 'W-SAT': 12,
'M': 24, 'MS': 24,
}
self._fit_diagnostic_lag = fit_diagnostic_lag_dict.get(freq)
super(LADStructuralModel, self).__init__(freq=freq, min_ts_mean=min_ts_mean,
min_ts_mean_window=min_ts_mean_window,
min_ts_length=min_ts_length, max_ts_length=max_ts_length,
**hyper_params, **kwargs)
@classmethod
def _signals(cls, idx, m, n):
"""
This function computes the sinusoids given the significant frequencies
:param list idx: A list containing the significant frequency indices obtained from the spectral density plot in fourier_extp()
:param int m: Specifying the current frequency
:param int n: Specifying the length of the time series
:return: A numpy array containing the sinusoids corresponding to the significant frequencies
"""
import numpy as np
signal = []
# Generating all the frequencies from a time series of length n
fs = | np.fft.fftfreq(n) | numpy.fft.fftfreq |
import numpy as np
import pytest
import torch
from probflow.distributions import Bernoulli
from probflow.models import Model
from probflow.parameters import CenteredParameter
def is_close(a, b, tol=1e-5):
return np.abs(a - b) < tol
def test_CenteredParameter_all_1d():
"""Tests probflow.parameters.CenteredParameter w/ center_by=all + 1D"""
# Create the parameter
param = CenteredParameter(5)
# posterior_mean should return mean
sample1 = param.posterior_mean()
sample2 = param.posterior_mean()
assert sample1.ndim == 2
assert sample2.ndim == 2
assert sample1.shape[0] == 5
assert sample2.shape[0] == 5
assert sample1.shape[1] == 1
assert sample2.shape[1] == 1
assert np.all(sample1 == sample2)
# mean should be 0! (that's the point of a centered parameter!)
assert is_close(np.mean(sample1), 0)
# posterior_sample should return samples
sample1 = param.posterior_sample()
sample2 = param.posterior_sample()
assert sample1.ndim == 2
assert sample2.ndim == 2
assert sample1.shape[0] == 5
assert sample1.shape[1] == 1
assert sample2.shape[0] == 5
assert sample2.shape[1] == 1
assert np.all(sample1 != sample2)
# mean should be 0! (that's the point of a centered parameter!)
assert is_close(np.mean(sample1), 0)
# posterior_sample should be able to return multiple samples
sample1 = param.posterior_sample(10)
sample2 = param.posterior_sample(10)
assert sample1.ndim == 3
assert sample2.ndim == 3
assert sample1.shape[0] == 10
assert sample1.shape[1] == 5
assert sample1.shape[2] == 1
assert sample2.shape[0] == 10
assert sample2.shape[1] == 5
assert sample2.shape[2] == 1
assert np.all(sample1 != sample2)
# mean should be 0!
assert np.all(np.abs(np.mean(sample1, axis=1)) < 1e-5)
def test_CenteredParameter_all_2d():
"""Tests probflow.parameters.CenteredParameter w/ center_by=all + 2D"""
# Shouldn't allow >2 dims
with pytest.raises(ValueError):
param = CenteredParameter([5, 6, 7])
# Create the parameter
param = CenteredParameter([5, 6])
# posterior_mean should return mean
sample1 = param.posterior_mean()
sample2 = param.posterior_mean()
assert sample1.ndim == 2
assert sample2.ndim == 2
assert sample1.shape[0] == 5
assert sample2.shape[0] == 5
assert sample1.shape[1] == 6
assert sample2.shape[1] == 6
assert np.all(sample1 == sample2)
# mean should be 0! (that's the point of a centered parameter!)
assert is_close(np.mean(sample1), 0)
# posterior_sample should return samples
sample1 = param.posterior_sample()
sample2 = param.posterior_sample()
assert sample1.ndim == 2
assert sample2.ndim == 2
assert sample1.shape[0] == 5
assert sample1.shape[1] == 6
assert sample2.shape[0] == 5
assert sample2.shape[1] == 6
assert np.all(sample1 != sample2)
# mean should be 0!
assert is_close(np.mean(sample1), 0)
# posterior_sample should be able to return multiple samples
sample1 = param.posterior_sample(10)
sample2 = param.posterior_sample(10)
assert sample1.ndim == 3
assert sample2.ndim == 3
assert sample1.shape[0] == 10
assert sample1.shape[1] == 5
assert sample1.shape[2] == 6
assert sample2.shape[0] == 10
assert sample2.shape[1] == 5
assert sample2.shape[2] == 6
assert np.all(sample1 != sample2)
# mean should be 0!
assert np.all(np.abs(np.mean(sample1.reshape((10, -1)), axis=1)) < 1e-5)
def test_CenteredParameter_column():
"""Tests probflow.parameters.CenteredParameter w/ center_by=column + 2D"""
# Create the parameter
param = CenteredParameter([5, 6], center_by="column")
# posterior_mean should return mean
sample1 = param.posterior_mean()
sample2 = param.posterior_mean()
assert sample1.ndim == 2
assert sample2.ndim == 2
assert sample1.shape[0] == 5
assert sample2.shape[0] == 5
assert sample1.shape[1] == 6
assert sample2.shape[1] == 6
assert | np.all(sample1 == sample2) | numpy.all |
"""
Base classes and functions for experiments.
"""
import numpy as np
import torch
import torch.nn.functional as F
import torch.utils.data
import matplotlib.pyplot as plt
from IPython.display import display, clear_output
from ipywidgets import Output
from collections import defaultdict
from tqdm import tqdm
import logging
import warnings
import time
from etn import coordinates, networks, transformers
class Model(object):
def __init__(self,
tfs=[],
coords=coordinates.identity_grid,
net=None,
equivariant=True,
downsample=1,
tf_opts={},
net_opts={},
seed=None,
load_path=None,
loglevel='INFO'):
"""
Model base class.
"""
# configure logging
numeric_level = getattr(logging, loglevel.upper(), None)
if not isinstance(numeric_level, int):
raise ValueError('Invalid log level: %s' % loglevel)
logging.basicConfig(level=numeric_level)
logging.info(str(self))
if load_path is not None:
logging.info('Loading model from file: %s -- using saved model configuration' % load_path)
spec = torch.load(load_path)
tfs = spec['tfs']
coords = spec['coords']
net = spec['net']
equivariant = spec['equivariant']
downsample = spec['downsample']
tf_opts = spec['tf_opts']
net_opts = spec['net_opts']
seed = spec['seed']
if net is None:
raise ValueError('net parameter must be specified')
if seed is not None:
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
np.random.seed(seed)
# build transformer sequence
if len(tfs) > 0:
pose_module = networks.EquivariantPosePredictor if equivariant else networks.DirectPosePredictor
tfs = [getattr(transformers, tf) if type(tf) is str else tf for tf in tfs]
seq = transformers.TransformerSequence(*[tf(pose_module, **tf_opts) for tf in tfs])
#seq = transformers.TransformerParallel(*[tf(pose_module, **tf_opts) for tf in tfs])
logging.info('Transformers: %s' % ' -> '.join([tf.__name__ for tf in tfs]))
logging.info('Pose module: %s' % pose_module.__name__)
else:
seq = None
# get coordinate function if given as a string
if type(coords) is str:
if hasattr(coordinates, coords):
coords = getattr(coordinates, coords)
elif hasattr(coordinates, coords + '_grid'):
coords = getattr(coordinates, coords + '_grid')
else:
raise ValueError('Invalid coordinate system: ' + coords)
logging.info('Coordinate transformation before classification: %s' % coords.__name__)
# define network
if type(net) is str:
net = getattr(networks, net)
network = net(**net_opts)
logging.info('Classifier architecture: %s' % net.__name__)
self.tfs = tfs
self.coords = coords
self.downsample = downsample
self.net = net
self.equivariant = equivariant
self.tf_opts = tf_opts
self.net_opts = net_opts
self.seed = seed
self.model = self._build_model(net=network, transformer=seq, coords=coords, downsample=downsample)
logging.info('Net opts: %s' % str(net_opts))
logging.info('Transformer opts: %s' % str(tf_opts))
if load_path is not None:
self.model.load_state_dict(spec['state_dict'])
def _build_model(self, net, transformer, coords, downsample):
return networks.TransformerCNN(
net=net,
transformer=transformer,
coords=coords,
downsample=downsample)
def _save(self, path, **kwargs):
spec = {
'tfs': [tf.__name__ for tf in self.tfs],
'coords': self.coords.__name__,
'net': self.net.__name__,
'equivariant': self.equivariant,
'downsample': self.downsample,
'tf_opts': self.tf_opts,
'net_opts': self.net_opts,
'seed': self.seed,
'state_dict': self.model.state_dict(),
}
spec.update(kwargs)
torch.save(spec, path)
def _load_dataset(self, path, num_examples=None):
# override in subclasses to handle custom preprocessing / different data formats
return Dataset(path=path, num_examples=num_examples)
def train(self,
num_epochs=300,
num_examples=None,
batch_size=128,
valid_batch_size=100,
train_path=None,
valid_path=None,
train_dataset_opts={},
valid_dataset_opts={},
optimizer='Adam',
optimizer_opts={'amsgrad': True, 'lr': 2e-3, 'weight_decay': 0.},
lr_schedule={'step_size': 1, 'gamma': 0.99},
save_path=None,
show_plot=False,
device='cuda:0'):
"""Train the model."""
if save_path is not None:
logging.info('Saving model with lowest validation error to %s' % save_path)
else:
warnings.warn('save_path not specified: model will not be saved')
# load training and validation data
if train_path is None:
raise ValueError('train_path must be specified')
if valid_path is None:
raise ValueError('valid_path must be specified')
logging.info('Loading training data from %s' % train_path)
train_loader = torch.utils.data.DataLoader(
self._load_dataset(
path=train_path,
num_examples=num_examples,
**train_dataset_opts),
shuffle=True,
batch_size=batch_size,
drop_last=True)
logging.info('Loading validation data from %s' % valid_path)
valid_loader = torch.utils.data.DataLoader(
self._load_dataset(
valid_path,
**valid_dataset_opts),
shuffle=False,
batch_size=valid_batch_size,
drop_last=False)
self.model.to(device)
optim = getattr(torch.optim, optimizer)(self.model.parameters(), **optimizer_opts)
scheduler = torch.optim.lr_scheduler.StepLR(optim, **lr_schedule)
if show_plot:
plotter = Plotter(show_plot=True)
plotter.show()
best_err = float('inf')
start_time = time.time()
for i in range(num_epochs):
# train for one epoch
logging.info('Training epoch %d' % (i+1))
train_losses = self._train(optim, scheduler, train_loader, device)
# evaluate on validation set
logging.info('Evaluating model on validation set')
valid_loss, valid_err = self._test(valid_loader, device)
logging.info('Validation loss = %.2e, validation error = %.4f' % (valid_loss, valid_err))
# save model with lowest validation error seen so far
if (save_path is not None) and (valid_err < best_err):
logging.info('Saving model with better validation error: %.2e (previously %.2e)' % (valid_err, best_err))
best_err = valid_err
self._save(save_path, epoch=i+1, valid_err=valid_err)
# update plot
if show_plot:
plotter.update(train_loss=train_losses, valid_loss=valid_loss, valid_err=valid_err)
logging.info('Finished training in %.1f s' % (time.time() - start_time))
return self
def test(self, batch_size=100, test_path=None, test_dataset_opts={}, device='cuda:0'):
"""Test the model."""
if test_path is None:
raise ValueError('test_path must be specified')
logging.info('loading test data from %s' % test_path)
loader = torch.utils.data.DataLoader(
self._load_dataset(
test_path,
**test_dataset_opts),
shuffle=False,
batch_size=batch_size,
drop_last=False)
self.model.to(device)
loss, err_rate = self._test(loader, device)
logging.info('Test loss = %.2e' % loss)
logging.info('Test error = %.4f' % err_rate)
return loss, err_rate
def predict(self, input, device='cuda:0', tf_output=False):
"""Predict a distribution over labels for a single example."""
self.model.eval()
self.model.to(device)
x = input.to(device)
if x.dim() == 3:
x = x.unsqueeze(0)
with torch.no_grad():
out = self.model(x, tf_output=tf_output)
logits = out[0] if tf_output else out
probs = F.softmax(logits.squeeze(0), dim=-1)
if tf_output:
return probs, out[1]
else:
return probs
def _train(self, optim, scheduler, loader, device):
self.model.train()
losses = []
for x, y in tqdm(loader):
x, y = x.to(device), y.to(device)
logits = self.model(x)
loss = F.cross_entropy(logits, y)
optim.zero_grad()
loss.backward()
optim.step()
losses.append(loss.item())
scheduler.step()
return losses
def _test(self, loader, device):
self.model.eval()
total_loss = 0
total_err = 0
count = 0
for x, y in tqdm(loader):
x, y = x.to(device), y.to(device)
count += x.shape[0]
with torch.no_grad():
logits = self.model(x)
yhat = torch.argmax(logits, dim=-1)
total_err += (y != yhat).sum().item()
total_loss += F.cross_entropy(logits, y, reduction='sum').item()
loss = total_loss / count
err_rate = total_err / count
return loss, err_rate
class Dataset(torch.utils.data.Dataset):
def __init__(self, path, num_examples=None, normalization=None):
self.path = path
self.normalization = normalization
self.data, self.targets = torch.load(self.path)
if self.data.dim() == 3:
self.data = self.data.unsqueeze(1) # singleton channel dimension
if num_examples is not None:
self.data = self.data[:num_examples]
self.targets = self.targets[:num_examples].type(torch.long)
if normalization is not None:
mean, std = normalization
mean = torch.Tensor(mean).view(1, -1, 1, 1)
std = torch.Tensor(std).view(1, -1, 1, 1)
self.data.add_(-mean).div_(std)
def __getitem__(self, index):
return self.data[index], self.targets[index]
def __len__(self):
return len(self.data)
class Plotter(object):
def __init__(self, id_string='', width=12, height=2.5, show_plot=True):
"""A dynamic plotting widget for tracking training progress in notebooks."""
self.id_string = id_string
self.width = width
self.height = height
self.output = Output()
self.metrics = defaultdict(list)
self.show_plot = show_plot
def update(self, **metrics):
for k, v in metrics.items():
if type(v) is list:
self.metrics[k] += v
else:
self.metrics[k].append(v)
self.output.clear_output(wait=True)
with self.output:
if self.show_plot:
self.plot()
plt.show()
maxlen = max(map(len, self.metrics.keys()))
print(self.id_string)
for k, v in self.metrics.items():
print(('%' + str(maxlen) + 's') % k,
'| current = %.2e' % v[-1],
'| max = %.2e (iter %4d)' % (np.max(v), np.argmax(v)),
'| min = %.2e (iter %4d)' % (np.min(v), np.argmin(v)))
def show(self):
display(self.output)
def progress_string(self):
s = self.id_string + '\n'
maxlen = max(map(len, self.metrics.keys()))
for k, v in self.metrics.items():
s += ''.join([('%' + str(maxlen) + 's') % k,
'| current = %.2e' % v[-1],
'| max = %.2e (iter %4d)' % (np.max(v), | np.argmax(v) | numpy.argmax |
from hazel.chromosphere import Hazel_atmosphere
from hazel.photosphere import SIR_atmosphere
from hazel.parametric import Parametric_atmosphere
from hazel.stray import Straylight_atmosphere
from hazel.configuration import Configuration
from hazel.io import Generic_output_file
from collections import OrderedDict
from hazel.codes import hazel_code, sir_code
from hazel.spectrum import Spectrum
from hazel.transforms import transformed_to_physical, physical_to_transformed, jacobian_transformation
import hazel.util
import numpy as np
import copy
import os
from pathlib import Path
import scipy.stats
import scipy.special
import scipy.signal
import scipy.linalg
import scipy.optimize
import warnings
import logging
import sys
__all__ = ['Model']
class Model(object):
def __init__(self, config=None, working_mode='synthesis', verbose=0, debug=False, rank=0, randomization=None, root=''):
np.random.seed(123)
if (rank != 0):
return
self.photospheres = []
self.chromospheres = []
self.chromospheres_order = []
self.atmospheres = {}
self.order_atmospheres = []
self.straylight = []
self.parametric = []
self.spectrum = []
self.configuration = None
self.n_cycles = 1
self.spectrum = {}
self.topologies = []
self.straylights = []
self.working_mode = working_mode
self.pixel = 0
self.debug = debug
self.use_analytical_RF_if_possible = False
self.nlte_available = False
self.use_nlte = False
self.root = root
self.epsilon = 1e-2
self.svd_tolerance = 1e-8
self.step_limiter_inversion = 1.0
self.backtracking = 'brent'
self.verbose = verbose
self.logger = logging.getLogger("model")
self.logger.setLevel(logging.DEBUG)
self.logger.handlers = []
ch = logging.StreamHandler()
# formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
formatter = logging.Formatter('%(asctime)s - %(message)s')
ch.setFormatter(formatter)
self.logger.addHandler(ch)
# Set randomization
if (randomization is None):
self.n_randomization = 1
else:
self.n_randomization = randomization
if (self.verbose >= 1):
self.logger.info('Hazel2 v1.0')
if ('torch' in sys.modules and 'torch_geometric' in sys.modules):
if (self.verbose >= 1):
self.logger.info('PyTorch and PyTorch Geometric found. NLTE for Ca II is available')
self.nlte_available = True
if (config is not None):
if (self.verbose >= 1):
self.logger.info('Using configuration from file : {0}'.format(config))
self.configuration = Configuration(config)
self.use_configuration(self.configuration.config_dict)
# Initialize pyhazel
hazel_code._init()
def __getstate__(self):
d = self.__dict__.copy()
if 'logger' in d:
d['logger'] = d['logger'].name
return d
def __setstate__(self, d):
if 'logger' in d:
d['logger'] = logging.getLogger(d['logger'])
self.__dict__.update(d)
def __str__(self):
tmp = ''
for l, par in self.__dict__.items():
if (l != 'LINES'):
tmp += '{0}: {1}\n'.format(l, par)
return tmp
def use_configuration(self, config_dict):
"""
Use a configuration file
Parameters
----------
config_dict : dict
Dictionary containing all the options from the configuration file previously read
Returns
-------
None
"""
# Deal with the spectral regions
tmp = config_dict['spectral regions']
# Output file
self.output_file = config_dict['working mode']['output file']
# Backtracking mode
if ('backtracking' in config_dict['working mode']):
self.backtracking = config_dict['working mode']['backtracking']
else:
self.backtracking = 'brent'
if (self.verbose >= 1):
self.logger.info('Backtracking mode : {0}'.format(self.backtracking))
# Working mode
# self.working_mode = config_dict['working mode']['action']
# Add spectral regions
for key, value in config_dict['spectral regions'].items():
self.add_spectral(value)
# Set number of cycles if present
if (self.working_mode == 'inversion'):
if ('number of cycles' in config_dict['working mode']):
if (config_dict['working mode']['number of cycles'] != 'None'):
self.n_cycles = int(config_dict['working mode']['number of cycles'])
if (self.verbose >= 1):
self.logger.info('Using {0} cycles'.format(self.n_cycles))
# Use analytical RFs if possible
if ('analytical rf if possible' in config_dict['working mode']):
if (config_dict['working mode']['analytical rf if possible'] != 'None'):
self.use_analytical_RF_if_possible = hazel.util.tobool(config_dict['working mode']['analytical rf if possible'])
else:
self.use_analytical_RF_if_possible = False
else:
self.use_analytical_RF_if_possible = False
if (self.verbose >= 1):
self.logger.info('Using analytical RFs if possible : {0}'.format(self.use_analytical_RF_if_possible))
# Set number of maximum iterations
if ('maximum iterations' in config_dict['working mode']):
if (config_dict['working mode']['number of cycles'] != 'None'):
self.max_iterations = int(config_dict['working mode']['maximum iterations'])
else:
self.max_iterations = 10
else:
self.max_iterations = 10
if (self.verbose >= 1):
self.logger.info('Using {0} max. iterations'.format(self.max_iterations))
# Randomization
if (self.verbose >= 1):
if (self.n_randomization == 1):
self.logger.info('Not using randomizations')
else:
self.logger.info('Using a maximum of {0} randomizations'.format(self.n_randomization))
# Set number of maximum iterations
if ('relative error' in config_dict['working mode']):
if (config_dict['working mode']['relative error'] != 'None'):
self.relative_error = float(config_dict['working mode']['relative error'])
if (self.verbose >= 1):
self.logger.info('Stopping when relative error is below {0}'.format(self.relative_error))
else:
self.relative_error = 1e-4
else:
self.relative_error = 1e-4
# Save all cycles
if ('save all cycles' not in config_dict['working mode']):
self.save_all_cycles = False
else:
self.save_all_cycles = hazel.util.tobool(config_dict['working mode']['save all cycles'])
if (self.verbose >= 1):
self.logger.info('Saving all cycles : {0}'.format(self.save_all_cycles))
# Deal with the atmospheres
tmp = config_dict['atmospheres']
self.atmospheres = {}
if (self.verbose >= 1):
self.logger.info('Adding atmospheres')
for key, value in tmp.items():
if ('photosphere' in key):
if (self.verbose >=1):
self.logger.info(' - New available photosphere : {0}'.format(value['name']))
self.add_photosphere(value)
if ('chromosphere' in key):
if (self.verbose >= 1):
self.logger.info(' - New available chromosphere : {0}'.format(value['name']))
self.add_chromosphere(value)
if ('parametric' in key):
if (self.verbose >= 1):
self.logger.info(' - New available parametric : {0}'.format(value['name']))
self.add_parametric(value)
if ('straylight' in key):
if (self.verbose >= 1):
self.logger.info(' - New available straylight : {0}'.format(value['name']))
self.add_straylight(value)
self.setup()
def setup(self):
"""
Setup the model for synthesis/inversion. This setup includes adding the topologies, removing unused
atmospheres, reading the number of cycles for the inversion and some sanity checks
Parameters
----------
None
Returns
-------
None
"""
# Adding topologies
if (self.verbose >= 1):
self.logger.info("Adding topologies")
for value in self.topologies:
self.add_topology(value)
# Remove unused atmospheres defined in the configuration file and not in the topology
if (self.verbose >= 1):
self.logger.info("Removing unused atmospheres")
self.remove_unused_atmosphere()
# Calculate indices for atmospheres
index_chromosphere = 1
index_photosphere = 1
self.n_photospheres = 0
self.n_chromospheres = 0
for k, v in self.atmospheres.items():
if (v.type == 'photosphere'):
v.index = index_photosphere
index_photosphere += 1
self.n_photospheres += 1
if (v.type == 'chromosphere'):
v.index = index_chromosphere
index_chromosphere += 1
self.n_chromospheres += 1
# Use analytical RFs if only photospheres are defined
if (self.n_chromospheres == 0 and self.use_analytical_RF_if_possible):
self.use_analytical_RF = True
if (self.verbose >= 1):
self.logger.info('Using analytical RFs : {0}'.format(self.use_analytical_RF))
else:
self.use_analytical_RF = False
# Check that number of pixels is the same for all atmospheric files if in synthesis mode
if (self.working_mode == 'synthesis'):
n_pixels = [v.n_pixel for k, v in self.atmospheres.items()]
all_equal = all(x == n_pixels[0] for x in n_pixels)
if (not all_equal):
for k, v in self.atmospheres.items():
self.logger.info('{0} -> {1}'.format(k, v.n_pixel))
raise Exception("Files with model atmospheres do not contain the same number of pixels")
else:
if (self.verbose >= 1):
self.logger.info('Number of pixels to read : {0}'.format(n_pixels[0]))
self.n_pixels = n_pixels[0]
if (self.working_mode == 'inversion'):
n_pixels = [v.n_pixel for k, v in self.spectrum.items()]
all_equal = all(x == n_pixels[0] for x in n_pixels)
if (not all_equal):
for k, v in self.spectrum.items():
self.logger.info('{0} -> {1}'.format(k, v.n_pixel))
raise Exception("Files with spectral regions do not contain the same number of pixels")
else:
if (self.verbose >= 1):
self.logger.info('Number of pixels to invert : {0}'.format(n_pixels[0]))
self.n_pixels = n_pixels[0]
# Check that the number of pixels from all observations (in case of inversion) is the same
# Check also if they are equal to those of the models
# n_pixels = [v.n_pixel for k, v in self.atmospheres.items()]
# all_equal = all(x == n_pixels[0] for x in n_pixels)
# Check that the number of cycles is the same for all atmospheres (in case of inversion)
if (self.working_mode == 'inversion'):
cycles = []
for k, v in self.atmospheres.items():
for k2, v2 in v.cycles.items():
if (v2 is not None):
cycles.append(len(v2))
all_equal = all(x == cycles[0] for x in cycles)
if (not all_equal):
raise Exception("Number of cycles in the nodes of active atmospheres is not always the same")
else:
if (self.n_cycles is None):
self.n_cycles = cycles[0]
# if (self.working_mode == 'inversion'):
# cycles = []
# for tmp in ['I', 'Q', 'U', 'V']:
# if ( cycles.append
# for k, v in self.atmospheres.items():
# for k2, v2 in v.cycles.items():
# cycles.append(len(v2))
# all_equal = all(x == cycles[0] for x in cycles)
# if (not all_equal):
# raise Exception("Number of cycles in the nodes of active atmospheres is not always the same")
# else:
# if (self.n_cycles is None):
# self.n_cycles = cycles[0]
filename = os.path.join(os.path.dirname(__file__),'data/LINEAS')
ff = open(filename, 'r')
self.LINES = ff.readlines()
ff.close()
self.init_sir()
for k, v in self.spectrum.items():
v.allocate_info_cycles(n_cycles=self.n_cycles)
for k, v in self.atmospheres.items():
v.allocate_info_cycles(n_cycles=self.n_cycles)
# Count total number of free parameters
if (self.working_mode == 'inversion'):
self.n_free_parameters = 0
for k, v in self.atmospheres.items():
for k2, v2 in v.cycles.items():
if (v2 is not None):
self.n_free_parameters += max(hazel.util.onlyint(v2[0:self.n_cycles+1]))
if (self.verbose >= 1):
self.logger.info('Total number of free parameters in all cycles : {0}'.format(self.n_free_parameters))
def open_output(self):
self.output_handler = Generic_output_file(self.output_file)
self.output_handler.open(self)
def close_output(self):
self.output_handler.close()
def write_output(self, randomization=0):
if (self.working_mode == 'synthesis'):
self.flatten_parameters_to_reference(cycle=0)
self.output_handler.write(self, pixel=0, randomization=randomization)
def add_spectral(self, spectral):
"""
Programmatically add a spectral region
Parameters
----------
spectral : dict
Dictionary containing the following data
'Name', 'Wavelength', 'Topology', 'Weights Stokes', 'Wavelength file', 'Wavelength weight file',
'Observations file', 'Mask file'
Returns
-------
None
"""
# Make sure that all keys of the input dictionary are in lower case
# This is irrelevant if a configuration file is used because this has been
# already done
value = hazel.util.lower_dict_keys(spectral)
if (self.verbose >= 1):
self.logger.info('Adding spectral region {0}'.format(value['name']))
if ('wavelength file' not in value):
value['wavelength file'] = None
elif (value['wavelength file'] == 'None'):
value['wavelength file'] = None
if ('wavelength weight file' not in value):
value['wavelength weight file'] = None
elif (value['wavelength weight file'] == 'None'):
value['wavelength weight file'] = None
if ('observations file' not in value):
value['observations file'] = None
elif (value['observations file'] == 'None'):
value['observations file'] = None
if ('stokes weights' not in value):
value['stokes weights'] = None
elif (value['stokes weights'] == 'None'):
value['stokes weights'] = None
if ('mask file' not in value):
value['mask file'] = None
elif (value['mask file'] == 'None'):
value['mask file'] = None
if ('los' not in value):
value['los'] = None
elif (value['los'] == 'None'):
value['los'] = None
for tmp in ['i', 'q', 'u', 'v']:
if ('weights stokes {0}'.format(tmp) not in value):
value['weights stokes {0}'.format(tmp)] = [None]*10
elif (value['weights stokes {0}'.format(tmp)] == 'None'):
value['weights stokes {0}'.format(tmp)] = [None]*10
if ('boundary condition' not in value):
value['boundary condition'] = None
elif (value['boundary condition'] == 'None'):
value['boundary condition'] = None
if ('instrumental profile' not in value):
value['instrumental profile'] = None
elif (value['instrumental profile'] == 'None'):
value['instrumental profile'] = None
# Wavelength file is not present
if (value['wavelength file'] is None):
# If the wavelength is defined
if ('wavelength' in value):
axis = value['wavelength']
wvl = np.linspace(float(axis[0]), float(axis[1]), int(axis[2]))
wvl_lr = None
if (self.verbose >= 1):
self.logger.info(' - Using wavelength axis from {0} to {1} with {2} steps'.format(float(axis[0]), float(axis[1]), int(axis[2])))
else:
raise Exception('Wavelength range is not defined. Please, use "Wavelength" or "Wavelength file"')
else:
# If both observed and synthetic wavelength points are given
if ('wavelength' in value):
axis = value['wavelength']
if (len(axis) != 3):
raise Exception("Wavelength range is not given in the format: lower, upper, steps")
wvl = np.linspace(float(axis[0]), float(axis[1]), int(axis[2]))
if (self.verbose >= 1):
self.logger.info(' - Using wavelength axis from {0} to {1} with {2} steps'.format(float(axis[0]), float(axis[1]), int(axis[2])))
self.logger.info(' - Reading wavelength axis from {0}'.format(value['wavelength file']))
wvl_lr = np.loadtxt(self.root + value['wavelength file'])
else:
if (self.verbose >= 1):
self.logger.info(' - Reading wavelength axis from {0}'.format(value['wavelength file']))
wvl = np.loadtxt(self.root + value['wavelength file'])
wvl_lr = None
if (value['wavelength weight file'] is None):
if (self.verbose >= 1 and self.working_mode == 'inversion'):
self.logger.info(' - Setting all wavelength weights to 1')
weights = np.ones((4,len(wvl)))
else:
if (self.verbose >= 1):
self.logger.info(' - Reading wavelength weights from {0}'.format(value['wavelength weight file']))
weights = np.loadtxt(self.root + value['wavelength weight file'], skiprows=1).T
# Observations file not present
if (value['observations file'] is None):
if (self.working_mode == 'inversion'):
raise Exception("Inversion mode without observations is not allowed.")
obs_file = None
else:
if (self.verbose >= 1):
self.logger.info(' - Using observations from {0}'.format(value['observations file']))
obs_file = value['observations file']
if (value['mask file'] is None):
mask_file = None
if (self.verbose >= 1):
self.logger.info(' - No mask for pixels')
else:
if (self.verbose >= 1):
self.logger.info(' - Using mask from {0}'.format(value['mask file']))
mask_file = value['mask file']
if (value['instrumental profile'] is None):
if (self.verbose >= 1):
self.logger.info(' - No instrumental profile')
else:
if (self.verbose >= 1):
self.logger.info(' - Instrumental profile : {0}'.format(value['instrumental profile']))
# if (value['straylight file'] is None):
# if (self.verbose >= 1):
# self.logger.info(' - Not using straylight')
# stray_file = None
# else:
# if (self.verbose >= 1):
# self.logger.info(' - Using straylight from {0}'.format(value['straylight file']))
# stray_file = value['straylight file']
if (value['los'] is None):
if (self.working_mode == 'synthesis'):
raise Exception("You need to provide the LOS for spectral region {0}".format(value['name']))
los = None
else:
los = np.array(value['los']).astype('float64')
if (self.verbose >= 1):
self.logger.info(' - Using LOS {0}'.format(value['los']))
if (value['boundary condition'] is None):
if (self.verbose >= 1):
self.logger.info(' - Using default boundary conditions [1,0,0,0] in spectral region {0} or read from file. Check carefully!'.format(value['name']))
boundary = np.array([1.0,0.0,0.0,0.0])
self.normalization = 'on-disk'
else:
boundary = np.array(value['boundary condition']).astype('float64')
if (boundary[0] == 0.0):
if (self.verbose >= 1):
self.logger.info(' - Using off-limb normalization (peak intensity)')
if (self.verbose >= 1):
self.logger.info(' - Using boundary condition {0}'.format(value['boundary condition']))
stokes_weights = []
for st in ['i', 'q', 'u', 'v']:
tmp = hazel.util.tofloat(value['weights stokes {0}'.format(st)])
tmp = [i if i is not None else 1.0 for i in tmp]
stokes_weights.append(tmp)
stokes_weights = np.array(stokes_weights)
self.spectrum[value['name']] = Spectrum(wvl=wvl, weights=weights, observed_file=obs_file,
name=value['name'], stokes_weights=stokes_weights, los=los, boundary=boundary, mask_file=mask_file, instrumental_profile=value['instrumental profile'], root=self.root, wvl_lr=wvl_lr)
self.topologies.append(value['topology'])
def add_photosphere(self, atmosphere):
"""
Programmatically add a photosphere
Parameters
----------
atmosphere : dict
Dictionary containing the following data
'Name', 'Spectral region', 'Height', 'Line', 'Wavelength', 'Reference atmospheric model',
'Ranges', 'Nodes'
Returns
-------
None
"""
# Make sure that all keys of the input dictionary are in lower case
# This is irrelevant if a configuration file is used because this has been
# already done
atm = hazel.util.lower_dict_keys(atmosphere)
self.atmospheres[atm['name']] = SIR_atmosphere(working_mode=self.working_mode, name=atm['name'], verbose=self.verbose)
lines = [int(k) for k in list(atm['spectral lines'])]
# If NLTE is available because PyTorch and PyTorch Geom are available
# check whether the line is needed in NLTE or not
if self.nlte_available:
if ('nlte' not in atm):
self.atmospheres[atm['name']].nlte = False
else:
self.atmospheres[atm['name']].nlte = hazel.util.tobool(atm['nlte'])
if (self.verbose >= 1):
self.logger.info(" * Line in NLTE if available")
else:
self.atmospheres[atm['name']].nlte = False
if ('wavelength' not in atm):
atm['wavelength'] = None
elif (atm['wavelength'] == 'None'):
atm['wavelength'] = None
if (atm['wavelength'] is not None):
wvl_range = [float(k) for k in atm['wavelength']]
else:
wvl_range = [np.min(self.spectrum[atm['spectral region']].wavelength_axis), np.max(self.spectrum[atm['spectral region']].wavelength_axis)]
if ('reference frame' in atm):
if ('line-of-sight' in atm['reference frame']):
self.atmospheres[atm['name']].reference_frame = 'line-of-sight'
if ('vertical' in atm['reference frame']):
raise Exception('Magnetic fields in photospheres are always in the line-of-sight reference frame.')
else:
self.atmospheres[atm['name']].reference_frame = 'line-of-sight'
if (self.verbose >= 1):
self.logger.info(" * Adding line : {0}".format(lines))
self.logger.info(" * Magnetic field reference frame : {0}".format(self.atmospheres[atm['name']].reference_frame))
self.atmospheres[atm['name']].add_active_line(lines=lines, spectrum=self.spectrum[atm['spectral region']],
wvl_range=np.array(wvl_range), verbose=self.verbose)
if (self.atmospheres[atm['name']].graphnet_nlte is not None):
self.set_nlte(True)
if ('ranges' in atm):
for k, v in atm['ranges'].items():
for k2, v2 in self.atmospheres[atm['name']].parameters.items():
if (k.lower() == k2.lower()):
if (v == 'None'):
self.atmospheres[atm['name']].ranges[k2] = None
else:
self.atmospheres[atm['name']].ranges[k2] = hazel.util.tofloat(v)
for k2, v2 in self.atmospheres[atm['name']].parameters.items():
self.atmospheres[atm['name']].regularization[k2] = None
if ('regularization' in atm):
for k, v in atm['regularization'].items():
for k2, v2 in self.atmospheres[atm['name']].parameters.items():
if (k.lower() == k2.lower()):
if (v == 'None'):
self.atmospheres[atm['name']].regularization[k2] = None
else:
self.atmospheres[atm['name']].regularization[k2] = v
if ('reference atmospheric model' in atm):
my_file = Path(self.root + atm['reference atmospheric model'])
if (not my_file.exists()):
raise FileExistsError("Input file {0} for atmosphere {1} does not exist.".format(my_file, atm['name']))
self.atmospheres[atm['name']].load_reference_model(self.root + atm['reference atmospheric model'], self.verbose)
if (self.atmospheres[atm['name']].model_type == '3d'):
self.atmospheres[atm['name']].n_pixel = self.atmospheres[atm['name']].model_handler.get_npixel()
if ('nodes' in atm):
for k, v in atm['nodes'].items():
for k2, v2 in self.atmospheres[atm['name']].parameters.items():
if (k.lower() == k2.lower()):
self.atmospheres[atm['name']].cycles[k2] = hazel.util.toint(v)
if ('temperature change to recompute departure coefficients' in atm):
self.atmospheres[atm['name']].t_change_departure = float(atm['temperature change to recompute departure coefficients'])
else:
self.atmospheres[atm['name']].t_change_departure = 0.0
def add_chromosphere(self, atmosphere):
"""
Programmatically add a chromosphere
Parameters
----------
atmosphere : dict
Dictionary containing the following data
'Name', 'Spectral region', 'Height', 'Line', 'Wavelength', 'Reference atmospheric model',
'Ranges', 'Nodes'
Returns
-------
None
"""
# Make sure that all keys of the input dictionary are in lower case
# This is irrelevant if a configuration file is used because this has been
# already done
atm = hazel.util.lower_dict_keys(atmosphere)
self.atmospheres[atm['name']] = Hazel_atmosphere(working_mode=self.working_mode, name=atm['name'])
if ('wavelength' not in atm):
atm['wavelength'] = None
elif (atm['wavelength'] == 'None'):
atm['wavelength'] = None
if (atm['wavelength'] is not None):
wvl_range = [float(k) for k in atm['wavelength']]
else:
wvl_range = [np.min(self.spectrum[atm['spectral region']].wavelength_axis), np.max(self.spectrum[atm['spectral region']].wavelength_axis)]
self.atmospheres[atm['name']].add_active_line(line=atm['line'], spectrum=self.spectrum[atm['spectral region']],
wvl_range=np.array(wvl_range))
if ('reference frame' in atm):
if (atm['reference frame'] == 'line-of-sight'):
self.atmospheres[atm['name']].reference_frame = 'line-of-sight'
if (atm['reference frame'] == 'vertical'):
self.atmospheres[atm['name']].reference_frame = 'vertical'
else:
self.atmospheres[atm['name']].reference_frame = 'vertical'
if (self.verbose >= 1):
self.logger.info(" * Adding line : {0}".format(atm['line']))
self.logger.info(" * Magnetic field reference frame : {0}".format(self.atmospheres[atm['name']].reference_frame))
if ('ranges' in atm):
for k, v in atm['ranges'].items():
for k2, v2 in self.atmospheres[atm['name']].parameters.items():
if (k.lower() == k2.lower()):
if (v == 'None'):
self.atmospheres[atm['name']].ranges[k2] = None
else:
self.atmospheres[atm['name']].ranges[k2] = hazel.util.tofloat(v)
for k2, v2 in self.atmospheres[atm['name']].parameters.items():
self.atmospheres[atm['name']].regularization[k2] = None
if ('regularization' in atm):
for k, v in atm['regularization'].items():
for k2, v2 in self.atmospheres[atm['name']].parameters.items():
if (k.lower() == k2.lower()):
if (v == 'None'):
self.atmospheres[atm['name']].regularization[k2] = None
else:
self.atmospheres[atm['name']].regularization[k2] = v
if ('coordinates for magnetic field vector' in atm):
if (atm['coordinates for magnetic field vector'] == 'cartesian'):
self.atmospheres[atm['name']].coordinates_B = 'cartesian'
if (atm['coordinates for magnetic field vector'] == 'spherical'):
self.atmospheres[atm['name']].coordinates_B = 'spherical'
else:
self.atmospheres[atm['name']].coordinates_B = 'cartesian'
self.atmospheres[atm['name']].select_coordinate_system()
if (self.verbose >= 1):
self.logger.info(" * Magnetic field coordinates system : {0}".format(self.atmospheres[atm['name']].coordinates_B))
if ('reference atmospheric model' in atm):
my_file = Path(self.root + atm['reference atmospheric model'])
if (not my_file.exists()):
raise FileExistsError("Input file {0} for atmosphere {1} does not exist.".format(my_file, atm['name']))
self.atmospheres[atm['name']].load_reference_model(self.root + atm['reference atmospheric model'], self.verbose)
if (self.atmospheres[atm['name']].model_type == '3d'):
self.atmospheres[atm['name']].n_pixel = self.atmospheres[atm['name']].model_handler.get_npixel()
# Set values of parameters
self.atmospheres[atm['name']].height = float(atm['height'])
if ('nodes' in atm):
for k, v in atm['nodes'].items():
for k2, v2 in self.atmospheres[atm['name']].parameters.items():
if (k.lower() == k2.lower()):
self.atmospheres[atm['name']].cycles[k2] = hazel.util.toint(v)
def add_parametric(self, atmosphere):
"""
Programmatically add a parametric atmosphere
Parameters
----------
atmosphere : dict
Dictionary containing the following data
'Name', 'Spectral region', 'Wavelength', 'Reference atmospheric model', 'Type',
'Ranges', 'Nodes'
Returns
-------
None
"""
# Make sure that all keys of the input dictionary are in lower case
# This is irrelevant if a configuration file is used because this has been
# already done
atm = hazel.util.lower_dict_keys(atmosphere)
self.atmospheres[atm['name']] = Parametric_atmosphere(working_mode=self.working_mode)
if ('wavelength' not in atm):
atm['wavelength'] = None
elif (atm['wavelength'] == 'None'):
atm['wavelength'] = None
if (atm['wavelength'] is not None):
wvl_range = [float(k) for k in atm['wavelength']]
else:
wvl_range = [np.min(self.spectrum[atm['spectral region']].wavelength_axis), np.max(self.spectrum[atm['spectral region']].wavelength_axis)]
self.atmospheres[atm['name']].add_active_line(spectrum=self.spectrum[atm['spectral region']],
wvl_range=np.array(wvl_range))
if ('ranges' in atm):
for k, v in atm['ranges'].items():
for k2, v2 in self.atmospheres[atm['name']].parameters.items():
if (k.lower() == k2.lower()):
if (v == 'None'):
self.atmospheres[atm['name']].ranges[k2] = None
else:
self.atmospheres[atm['name']].ranges[k2] = hazel.util.tofloat(v)
if ('reference atmospheric model' in atm):
my_file = Path(self.root + atm['reference atmospheric model'])
if (not my_file.exists()):
raise FileExistsError("Input file {0} for atmosphere {1} does not exist.".format(my_file, atm['name']))
self.atmospheres[atm['name']].load_reference_model(self.root + atm['reference atmospheric model'], self.verbose)
if (self.atmospheres[atm['name']].model_type == '3d'):
self.atmospheres[atm['name']].n_pixel = self.atmospheres[atm['name']].model_handler.get_npixel()
# Set values of parameters
if ('nodes' in atm):
for k, v in atm['nodes'].items():
for k2, v2 in self.atmospheres[atm['name']].parameters.items():
if (k.lower() == k2.lower()):
self.atmospheres[atm['name']].cycles[k2] = hazel.util.toint(v)
for k2, v2 in self.atmospheres[atm['name']].parameters.items():
self.atmospheres[atm['name']].regularization[k2] = None
if ('regularization' in atm):
for k, v in atm['regularization'].items():
for k2, v2 in self.atmospheres[atm['name']].parameters.items():
if (k.lower() == k2.lower()):
if (v == 'None'):
self.atmospheres[atm['name']].regularization[k2] = None
else:
self.atmospheres[atm['name']].regularization[k2] = v
def add_straylight(self, atmosphere):
"""
Programmatically add a straylight atmosphere
Parameters
----------
atmosphere : dict
Dictionary containing the following data
'Name', 'Spectral region', 'Reference atmospheric model',
'Ranges', 'Nodes'
Returns
-------
None
"""
# Make sure that all keys of the input dictionary are in lower case
# This is irrelevant if a configuration file is used because this has been
# already done
atm = hazel.util.lower_dict_keys(atmosphere)
self.atmospheres[atm['name']] = Straylight_atmosphere(working_mode=self.working_mode)
if ('wavelength' not in atm):
atm['wavelength'] = None
elif (atm['wavelength'] == 'None'):
atm['wavelength'] = None
if (atm['wavelength'] is not None):
wvl_range = [float(k) for k in atm['wavelength']]
else:
wvl_range = [np.min(self.spectrum[atm['spectral region']].wavelength_axis), np.max(self.spectrum[atm['spectral region']].wavelength_axis)]
self.atmospheres[atm['name']].add_active_line(spectrum=self.spectrum[atm['spectral region']],
wvl_range=np.array(wvl_range))
if ('ranges' in atm):
for k, v in atm['ranges'].items():
for k2, v2 in self.atmospheres[atm['name']].parameters.items():
if (k.lower() == k2.lower()):
if (v == 'None'):
self.atmospheres[atm['name']].ranges[k2] = None
else:
self.atmospheres[atm['name']].ranges[k2] = hazel.util.tofloat(v)
my_file = Path(self.root + atm['reference atmospheric model'])
if (not my_file.exists()):
raise FileExistsError("Input file {0} for atmosphere {1} does not exist.".format(my_file, atm['name']))
if ('reference atmospheric model' in atm):
self.atmospheres[atm['name']].load_reference_model(self.root + atm['reference atmospheric model'], self.verbose)
if (self.atmospheres[atm['name']].model_type == '3d'):
self.atmospheres[atm['name']].n_pixel = self.atmospheres[atm['name']].model_handler.get_npixel()
# Set values of parameters
if ('nodes' in atm):
for k, v in atm['nodes'].items():
for k2, v2 in self.atmospheres[atm['name']].parameters.items():
if (k.lower() == k2.lower()):
self.atmospheres[atm['name']].cycles[k2] = hazel.util.toint(v)
for k2, v2 in self.atmospheres[atm['name']].parameters.items():
self.atmospheres[atm['name']].regularization[k2] = None
if ('regularization' in atm):
for k, v in atm['regularization'].items():
for k2, v2 in self.atmospheres[atm['name']].parameters.items():
if (k.lower() == k2.lower()):
if (v == 'None'):
self.atmospheres[atm['name']].regularization[k2] = None
else:
self.atmospheres[atm['name']].regularization[k2] = v
def remove_unused_atmosphere(self):
"""
Remove unused atmospheres
Parameters
----------
None
Returns
-------
None
"""
to_remove = []
for k, v in self.atmospheres.items():
if (not v.active):
to_remove.append(k)
if (self.verbose >= 1):
self.logger.info(' - Atmosphere {0} deleted.'.format(k))
for k in to_remove:
self.atmospheres.pop(k)
def init_sir_external(self):
"""
Initialize SIR for this synthesis
Parameters
----------
None
Returns
-------
None
"""
for k, v in self.atmospheres.items():
if (v.type == 'photosphere'):
f = open('lte.grid', 'w')
f.write("IMPORTANT: a) All items must be separated by commas. \n")
f.write(" b) The first six characters of the last line \n")
f.write(" in the header (if any) must contain the symbol --- \n")
f.write("\n")
f.write("Line and blends indices : Initial lambda Step Final lambda \n")
f.write("(in this order) (mA) (mA) (mA) \n")
f.write("-----------------------------------------------------------------------\n")
ind_low = (np.abs(v.spectrum.wavelength_axis - v.wvl_range_lambda[0])).argmin()
ind_top = (np.abs(v.spectrum.wavelength_axis - v.wvl_range_lambda[1])).argmin()
low = v.spectrum.wavelength_axis[ind_low]
top = v.spectrum.wavelength_axis[ind_top] # TODO
delta = (v.spectrum.wavelength_axis[1] - v.spectrum.wavelength_axis[0])
filename = os.path.join(os.path.dirname(__file__),'data/LINEAS')
ff = open(filename, 'r')
flines = ff.readlines()
ff.close()
for i in range(len(v.lines)):
for l in flines:
tmp = l.split()
index = int(tmp[0].split('=')[0])
if (index == v.lines[0]):
wvl = float(tmp[2])
f.write("{0} : {1}, {2}, {3}\n".format(str(v.lines)[1:-1], 1e3*(low-wvl), 1e3*delta, 1e3*(top-wvl)))
f.close()
v.n_lambda = sir_code.init_externalfile(v.index, filename)
def init_sir(self):
"""
Initialize SIR for this synthesis. This version does not make use of any external file, which might be
not safe when running in MPI mode.
Parameters
----------
None
Returns
-------
None
"""
lines = []
n_lines = 0
elements = {'H':1,'HE':2,'LI':3,'BE':4,'B':5,'C':6,'N':7,'O':8,'F':9,'NE':10,
'NA':11,'MG':12,'AL':13,'SI':14,'P':15,'S':16,'CL':17,'AR':18,'K':19,'CA':20,'SC':21,'TI':22,'V':23,'CR':24,
'MN':25,'FE':26,'CO':27,'NI':28,'CU':29,'ZN':30,'GA':31,'GE':32,'AS':33,'SE':34,'BR':35,'KR':36,
'RB':37,'SR':38,'Y':39,'ZR':40,'NB':41,'MO':42,'TC':43,'RU':44,'RH':45,'PD':46,'AG':47,'CD':48,'IN':49,
'SN':50,'SB':51,'TE':52,'I':53,'XE':54,'CS':55,'BA':56,'LA':57,'CE':58,'PR':59,'ND':60,'PM':61,
'SM':62,'EU':63,'GD':64,'TB':65,'DY':66,'HO':67,'ER':68,'TM':69,'YB':70,'LU':71,'HF':72,'TA':73,'W':74,
'RE':75,'OS':76,'IR':77,'PT':78,'AU':79,'HG':80,'TL':81,'PB':82,'BI':83,'PO':84,'AT':85,'RN':86,
'FR':87,'RA':88,'AC':89,'TH':90,'PA':91,'U':92}
states = {'S': 0, 'P': 1, 'D': 2, 'F': 3, 'G': 4, 'H': 5, 'I': 6}
for k, v in self.atmospheres.items():
if (v.type == 'photosphere'):
n_lines += 1
ind_low = (np.abs(v.spectrum.wavelength_axis - v.wvl_range_lambda[0])).argmin()
ind_top = (np.abs(v.spectrum.wavelength_axis - v.wvl_range_lambda[1])).argmin()
low = v.spectrum.wavelength_axis[ind_low]
top = v.spectrum.wavelength_axis[ind_top] # TODO
delta = (v.spectrum.wavelength_axis[1] - v.spectrum.wavelength_axis[0])
nblend = len(v.lines)
lines = np.zeros(len(v.lines), dtype=np.intc)
atom = np.zeros(len(v.lines), dtype=np.intc)
istage = np.zeros(len(v.lines), dtype=np.intc)
wvl = np.zeros(len(v.lines))
zeff = np.zeros(len(v.lines))
energy = np.zeros(len(v.lines))
loggf = np.zeros(len(v.lines))
mult1 = np.zeros(len(v.lines), dtype=np.intc)
mult2 = np.zeros(len(v.lines), dtype=np.intc)
design1 = np.zeros(len(v.lines), dtype=np.intc)
design2 = np.zeros(len(v.lines), dtype=np.intc)
tam1 = np.zeros(len(v.lines))
tam2 = np.zeros(len(v.lines))
alfa = np.zeros(len(v.lines))
sigma = np.zeros(len(v.lines))
for i in range(len(v.lines)):
lines[i] = v.lines[i]
for l in self.LINES:
tmp = l.split()
index = int(tmp[0].split('=')[0])
if (index == v.lines[i]):
atom[i] = elements[tmp[0].split('=')[1]]
istage[i] = tmp[1]
wvl[i] = float(tmp[2])
zeff[i] = float(tmp[3])
energy[i] = float(tmp[4])
loggf[i] = float(tmp[5])
mult1[i] = int(tmp[6][:-1])
mult2[i] = int(tmp[8][:-1])
design1[i] = states[tmp[6][-1]]
design2[i] = states[tmp[8][-1]]
tam1[i] = float(tmp[7].split('-')[0])
tam2[i] = float(tmp[9].split('-')[0])
if (len(tmp) == 12):
alfa[i] = float(tmp[-2])
sigma[i] = float(tmp[-1])
else:
alfa[i] = 0.0
sigma[i] = 0.0
lambda0 = 1e3*(low-wvl[0])
lambda1 = 1e3*(top-wvl[0])
n_steps = ind_top - ind_low + 1
v.n_lambda = n_steps
sir_code.init(v.index, nblend, lines, atom, istage, wvl, zeff, energy, loggf,
mult1, mult2, design1, design2, tam1, tam2, alfa, sigma, lambda0, lambda1, n_steps)
def exit_hazel(self):
for k, v in self.atmospheres.items():
if (v.type == 'chromosphere'):
hazel_code.exit(v.index)
def add_topology(self, atmosphere_order):
"""
Add a new topology
Parameters
----------
topology : str
Topology
Returns
-------
None
"""
# Transform the order to a list of lists
if (self.verbose >= 1):
self.logger.info(' - {0}'.format(atmosphere_order))
vertical_order = atmosphere_order.split('->')
order = []
for k in vertical_order:
name = k.strip().replace('(','').replace(')','').split('+')
name = [k.strip() for k in name]
tmp = []
for n in name:
tmp.append(n)
self.atmospheres[n].active = True
order.append(tmp)
order_flat = [item for sublist in order for item in sublist]
# Check that straylight components, if any, are not at the last position
for atm in order_flat[:-1]:
if (self.atmospheres[atm].type == 'straylight'):
raise Exception("Straylight components can only be at the last position of a topology.")
self.order_atmospheres.append(order)
# Check that there are no two photospheres linked with ->
# because they do not make any sense
n_photospheres_linked = []
for atmospheres in self.order_atmospheres:
for order in atmospheres:
for k, atm in enumerate(order):
if (self.atmospheres[atm].type == 'photosphere'):
n_photospheres_linked.append(k)
if (len(n_photospheres_linked) != len(set(n_photospheres_linked))):
raise Exception("There are several photospheres linked with ->. This is not allowed.")
def normalize_ff(self):
"""
Normalize all filling factors so that they add to one to avoid later problems.
We use a softmax function to make sure they all add to one and can be unconstrained
ff_i = exp(x_i) / sum(exp(x_i))
Parameters
----------
None
Returns
-------
None
"""
for atmospheres in self.order_atmospheres:
for order in atmospheres:
total_ff = 0.0
for atm in order:
if (self.atmospheres[atm].type != 'straylight'):
if (self.working_mode == 'inversion'):
self.atmospheres[atm].parameters['ff'], self.atmospheres[atm].ranges['ff'][0], self.atmospheres[atm].ranges['ff'][1]
ff = transformed_to_physical(self.atmospheres[atm].parameters['ff'], self.atmospheres[atm].ranges['ff'][0], self.atmospheres[atm].ranges['ff'][1])
else:
ff = transformed_to_physical(self.atmospheres[atm].parameters['ff'], -0.00001, 1.00001)
total_ff += ff
for atm in order:
if (self.atmospheres[atm].type != 'straylight'):
if (self.working_mode == 'inversion'):
ff = transformed_to_physical(self.atmospheres[atm].parameters['ff'], self.atmospheres[atm].ranges['ff'][0], self.atmospheres[atm].ranges['ff'][1])
self.atmospheres[atm].parameters['ff'] = ff / total_ff
self.atmospheres[atm].parameters['ff'] = physical_to_transformed(self.atmospheres[atm].parameters['ff'], self.atmospheres[atm].ranges['ff'][0], self.atmospheres[atm].ranges['ff'][1])
else:
ff = transformed_to_physical(self.atmospheres[atm].parameters['ff'], -0.00001, 1.00001)
self.atmospheres[atm].parameters['ff'] = ff / total_ff
self.atmospheres[atm].parameters['ff'] = physical_to_transformed(self.atmospheres[atm].parameters['ff'], -0.00001, 1.00001)
def synthesize_spectral_region(self, spectral_region, perturbation=False):
"""
Synthesize all atmospheres for a single spectral region and normalize to the continuum of the quiet Sun at disk center
Parameters
----------
spectral_region : str
Spectral region to synthesize
perturbation : bool
Set to True if you are synthesizing with a perturbation. In this case, the synthesis
is saved in spectrum.stokes_perturbed instead of spectrum.stokes
Returns
-------
None
"""
stokes = None
stokes_out = None
# Loop over all atmospheres
for i, atmospheres in enumerate(self.order_atmospheres):
for n, order in enumerate(atmospheres):
for k, atm in enumerate(order):
if (self.atmospheres[atm].spectrum.name == spectral_region):
# Update the boundary condition only for the first atmosphere if several are sharing ff
if (n > 0 and k == 0):
ind_low, ind_top = self.atmospheres[atm].wvl_range
if (perturbation):
stokes_out = self.atmospheres[atm].spectrum.stokes_perturbed[:, ind_low:ind_top] * hazel.util.i0_allen(self.atmospheres[atm].spectrum.wavelength_axis[ind_low:ind_top], 1.0)[None,:]
else:
stokes_out = self.atmospheres[atm].spectrum.stokes[:, ind_low:ind_top] * hazel.util.i0_allen(self.atmospheres[atm].spectrum.wavelength_axis[ind_low:ind_top], 1.0)[None,:]
if (self.atmospheres[atm].type == 'straylight'):
stokes, error = self.atmospheres[atm].synthesize(nlte=self.use_nlte)
if (error == 1):
raise
stokes += (1.0 - self.atmospheres[atm].parameters['ff']) * stokes_out
else:
if (k == 0):
if (self.use_analytical_RF):
stokes, self.rf_analytical, error = self.atmospheres[atm].synthesize(stokes_out, returnRF=True, nlte=self.use_nlte)
else:
stokes, error = self.atmospheres[atm].synthesize(stokes_out, nlte=self.use_nlte)
else:
tmp, error = self.atmospheres[atm].synthesize(stokes_out, nlte=self.use_nlte)
stokes += tmp
ind_low, ind_top = self.atmospheres[atm].wvl_range
mean_wvl = np.mean(self.atmospheres[atm].spectrum.wavelength_axis[ind_low:ind_top])
i0 = hazel.util.i0_allen(mean_wvl, 1.0)
# Divide by i0
if (self.use_analytical_RF):
for k, v in self.rf_analytical.items():
if (k != 'ff'):
v /= i0
if (perturbation):
self.atmospheres[atm].spectrum.stokes_perturbed[:, ind_low:ind_top] = stokes / i0#[None,:]
else:
self.atmospheres[atm].spectrum.stokes[:, ind_low:ind_top] = stokes / i0#[None,:]
def set_nlte(self, option):
"""
Set calculation of Ca II 8542 A to NLTE
Parameters
----------
option : bool
Set to True to use NLTE, False to use LTE
"""
self.use_nlte = option
if (self.verbose >= 1):
self.logger.info('Setting NLTE for Ca II 8542 A to {0}'.format(self.use_nlte))
def synthesize(self, perturbation=False):
"""
Synthesize all atmospheres
Parameters
----------
perturbation : bool
Set to True if you are synthesizing with a perturbation. In this case, the synthesis
is saved in spectrum.stokes_perturbed instead of spectrum.stokes
Returns
-------
None
"""
if (self.working_mode == 'inversion'):
self.normalize_ff()
for k, v in self.spectrum.items():
self.synthesize_spectral_region(k, perturbation=perturbation)
if (v.normalization == 'off-limb'):
if (perturbation):
v.stokes_perturbed /= np.max(v.stokes_perturbed[0,:])
else:
v.stokes /= np.max(v.stokes[0,:])
if (v.psf_spectral is not None):
for i in range(4):
if (perturbation):
v.stokes_perturbed[i,:] = scipy.signal.convolve(v.stokes_perturbed[i,:], v.psf_spectral, mode='same', method='auto')
else:
v.stokes[i,:] = scipy.signal.convolve(v.stokes[i,:], v.psf_spectral, mode='same', method='auto')
if (v.interpolate_to_lr):
for i in range(4):
if (perturbation):
v.stokes_perturbed_lr[i,:] = np.interp(v.wavelength_axis_lr, v.wavelength_axis, v.stokes_perturbed[i,:])
else:
v.stokes_lr[i,:] = np.interp(v.wavelength_axis_lr, v.wavelength_axis, v.stokes[i,:])
def find_active_parameters(self, cycle):
"""
Find all active parameters in all active atmospheres in the current cycle
Parameters
----------
cycle : int
Cycle to consider
Returns
-------
None
"""
pars = []
coupled = []
self.nodes = []
left = 0
right = 0
for atmospheres in self.order_atmospheres:
for n, order in enumerate(atmospheres):
for k, atm in enumerate(order):
for l, par in self.atmospheres[atm].cycles.items():
if (par is not None):
if (hazel.util.isint(par[cycle])):
if (par[cycle] > 0):
# [Atmosphere name, n_nodes, nodes, value, range]
self.atmospheres[atm].nodes[l] = np.zeros(par[cycle])
self.atmospheres[atm].n_nodes[l] = par[cycle]
right += par[cycle]
n_lambda = len(self.atmospheres[atm].spectrum.wavelength_axis)
tmp = {'atm': atm, 'n_nodes': par[cycle], 'parameter': l,
'ranges': self.atmospheres[atm].ranges[l], 'delta': self.atmospheres[atm].epsilon[l],
'left': left, 'right': right, 'regularization': self.atmospheres[atm].regularization[l],
'coupled': False}
self.nodes.append(self.atmospheres[atm].nodes[l])
left = copy.copy(right)
pars.append(tmp)
else:
self.atmospheres[atm].nodes[l] = 0.0
self.atmospheres[atm].n_nodes[l] = 0
else:
n_lambda = len(self.atmospheres[atm].spectrum.wavelength_axis)
tmp = {'atm': atm, 'n_nodes': par[cycle], 'parameter': l, 'coupled': True}
coupled.append(tmp)
self.active_meta = pars
self.coupled_meta = coupled
if (not self.nodes):
raise Exception("No parameters to invert in cycle {0}. Please add them or reduce the number of cycles. ".format(cycle))
self.nodes = | np.concatenate(self.nodes) | numpy.concatenate |
"""
=======================================
The :mod:`mpi_array.locale_test` Module
=======================================
Module defining :mod:`mpi_array.locale` unit-tests.
Execute as::
python -m mpi_array.locale_test
Classes
=======
.. autosummary::
:toctree: generated/
:template: autosummary/inherits_TestCase_class.rst
WinLndarrayTest - Tests for :obj:`mpi_array.locale.win_lndarray`.
LndarrayTest - Tests for :obj:`mpi_array.locale.lndarray`.
LndarrayProxyTest - Tests for :obj:`mpi_array.locale.LndarrayProxy`.
"""
from __future__ import absolute_import
from array_split.split import shape_factors as _shape_factors
import mpi4py.MPI as _mpi
import numpy as _np # noqa: E402,F401
from . import locale as _locale
from .license import license as _license, copyright as _copyright, version as _version
from . import unittest as _unittest
from . import logging as _logging # noqa: E402,F401
from .comms import create_distribution, LT_PROCESS, LT_NODE
from .distribution import IndexingExtent, LocaleExtent
from .distribution import GlobaleExtent
__author__ = "<NAME>"
__license__ = _license()
__copyright__ = _copyright()
__version__ = _version()
class WinLndarrayTest(_unittest.TestCase):
"""
Tests for :obj:`mpi_array.locale.win_lndarray`.
"""
def test_construct_with_invalid_comm(self):
"""
Tests that ValueError is raised for invalid communicator argument
passed to :obj:`mpi_array.locale.win_lndarray` constructor.
"""
comm = None
self.assertRaises(ValueError, _locale.win_lndarray, shape=(100,), comm=comm)
comm = _mpi.COMM_NULL
self.assertRaises(ValueError, _locale.win_lndarray, shape=(100,), comm=comm)
def test_construct(self):
"""
Tests for :obj:`mpi_array.locale.win_lndarray` construction.
"""
comm = _mpi.COMM_SELF
ary = _locale.win_lndarray(shape=(10, 10, 10), dtype="int32", comm=comm)
self.assertTrue(ary.comm is comm)
self.assertTrue(ary.win is not None)
comm = _mpi.COMM_WORLD
if (_mpi.VERSION >= 3) and (comm.size > 1):
comm = comm.Split_type(_mpi.COMM_TYPE_SHARED, key=comm.rank)
if comm.size > 1:
ary = _locale.win_lndarray(shape=(comm.size, 10, 10), dtype="int32", comm=comm)
self.assertTrue(ary.comm is comm)
self.assertTrue(ary.win is not None)
ary[comm.rank] = comm.rank + 1
comm.barrier()
for r in range(0, comm.size):
self.assertTrue(_np.all(ary[r] == (r + 1)))
class LndarrayTest(_unittest.TestCase):
"""
:obj:`unittest.TestCase` for :obj:`mpi_array.locale.lndarray`.
"""
def test_construct(self):
"""
Tests :meth:`mpi_array.locale.lndarray.__new__`.
"""
gshape = (11, 13, 51)
with _locale.ones(shape=gshape, dtype="int16") as lary:
slary = lary.lndarray
# MPI windows own buffer data
self.assertTrue(slary.flags.carray)
self.assertFalse(slary.flags.owndata)
self.assertFalse(slary.base.flags.owndata)
self.assertTrue(slary.base.flags.carray)
lshape = slary.shape
bad_lshape = list(lshape)
bad_lshape[-1] += 1
self.assertRaises(
ValueError,
_locale.lndarray,
shape=lshape
)
def test_view(self):
"""
Tests :meth:`mpi_array.locale.lndarray.__getitem__`.
"""
gshape = (11, 13, 51)
with _locale.ones(shape=gshape, dtype="int16") as lary:
slary = lary.lndarray
# MPI windows own buffer data
self.assertTrue(slary.flags.carray)
self.assertFalse(slary.flags.owndata)
self.assertFalse(slary.base.flags.owndata)
self.assertTrue(slary.base.flags.carray)
v = lary[0:slary.shape[0] // 2, 0:slary.shape[1] // 2, 0:slary.shape[2] // 2]
self.assertTrue(isinstance(v, _locale.lndarray))
self.assertFalse(v.flags.owndata)
self.assertFalse(
((v.size > 0) and (v.shape[0] > 1) and (v.shape[1] > 1))
and
v.flags.carray,
"v.size=%s, v.shape=%s, v.flags.carray=%s" % (v.size, v.shape, v.flags.carray)
)
self.assertTrue(isinstance(v.base, _locale.lndarray))
self.assertFalse(v.base.flags.owndata)
self.assertTrue(v.base.flags.carray)
self.assertFalse(isinstance(v.base.base, _locale.lndarray))
self.assertTrue(isinstance(v.base.base, _np.ndarray))
self.assertFalse(v.base.base.flags.owndata)
self.assertTrue(v.base.base.flags.carray)
def test_numpy_sum(self):
"""
Test :func:`numpy.sum` reduction using a :obj:`mpi_array.locale.lndarray`
as argument.
"""
gshape = (50, 50, 50)
comms_and_distrib = create_distribution(gshape)
with \
_locale.ones(shape=gshape, comms_and_distrib=comms_and_distrib, dtype="int32") \
as lary:
slary = lary.lndarray
l_sum = _np.sum(lary.rank_view_n)
self.assertFalse(l_sum.flags.owndata)
self.assertTrue(l_sum.base.flags.owndata)
rank_logger = _logging.get_rank_logger(__name__ + self.id())
rank_logger.info("type(slary)=%s", type(slary))
rank_logger.info("type(slary.base)=%s", type(slary.base))
rank_logger.info("type(l_sum)=%s", type(l_sum))
rank_logger.info("type(l_sum.base)=%s", type(l_sum.base))
g_sum = comms_and_distrib.locale_comms.peer_comm.allreduce(l_sum, op=_mpi.SUM)
self.assertEqual(_np.product(gshape), g_sum)
class LndarrayProxyTest(_unittest.TestCase):
"""
:obj:`unittest.TestCase` for :obj:`mpi_array.locale.LndarrayProxy`.
"""
def test_construct_arg_checking(self):
"""
Test for :meth:`mpi_array.locale.LndarrayProxy.__new__`.
"""
self.assertRaises(
ValueError,
_locale.LndarrayProxy,
shape=None,
locale_extent=None,
dtype="int64"
)
gshape = (10, 11, 12, 13, 5)
bad_lshape = gshape
bad_lshape = (bad_lshape[0] - 1,) + bad_lshape[1:]
self.assertRaises(
ValueError,
_locale.LndarrayProxy,
shape=bad_lshape,
locale_extent=LocaleExtent(
peer_rank=_mpi.COMM_WORLD.rank,
inter_locale_rank=_mpi.COMM_WORLD.rank,
globale_extent=GlobaleExtent(stop=gshape),
start=(0, 0, 0, 0, 0),
stop=gshape
)
)
def test_fill(self):
"""
Test for :meth:`mpi_array.locale.LndarrayProxy.fill`.
"""
comms_and_distrib = create_distribution(shape=(24, 35, 14, 7))
with \
_locale.empty(
shape=(24, 35, 14, 7),
comms_and_distrib=comms_and_distrib,
dtype="int64"
) as lary:
rank_val = comms_and_distrib.locale_comms.peer_comm.rank + 1
lary.fill(rank_val)
self.assertTrue(_np.all(lary[lary.rank_view_slice_n] == rank_val))
def test_get_and_set_item(self):
"""
"""
comms_and_distrib = create_distribution(shape=(24, 35, 14, 7))
with \
_locale.empty(
shape=(24, 35, 14, 7),
comms_and_distrib=comms_and_distrib,
dtype="int64"
) as lary:
rank_val = comms_and_distrib.locale_comms.peer_comm.rank + 1
lary[lary.rank_view_slice_n] = rank_val
self.assertSequenceEqual(
list(IndexingExtent(lary.rank_view_slice_n).shape),
list(lary[lary.rank_view_slice_n].shape)
)
self.assertTrue(_np.all(lary[lary.rank_view_slice_n] == rank_val))
self.assertSequenceEqual(
list(IndexingExtent(lary.rank_view_slice_h).shape),
list(lary[lary.rank_view_slice_h].shape)
)
def test_empty_shared_1d(self):
"""
Test for :func:`_locale.empty` and :func:`_locale.empty_like`.
"""
lshape = (10,)
gshape = (_mpi.COMM_WORLD.size * lshape[0],)
cand = create_distribution(shape=gshape)
with _locale.empty(comms_and_distrib=cand, dtype="int64") as lary:
self.assertEqual(_np.dtype("int64"), lary.dtype)
self.assertSequenceEqual(
list(lshape),
list(IndexingExtent(lary.intra_partition.rank_view_slice_n).shape)
)
with _locale.empty_like(lary) as lary1:
self.assertEqual(_np.dtype("int64"), lary1.dtype)
self.assertSequenceEqual(
list(lshape),
list(IndexingExtent(lary1.intra_partition.rank_view_slice_n).shape)
)
ary = _locale.empty_like(_np.zeros(lshape, dtype="int64"))
self.assertEqual(_np.dtype("int64"), ary.dtype)
self.assertSequenceEqual(
list(lshape),
list(ary.shape)
)
def test_empty_non_shared_1d(self):
"""
Test for :func:`_locale.empty` and :func:`_locale.empty_like`.
"""
lshape = (10,)
gshape = (_mpi.COMM_WORLD.size * lshape[0],)
cand = create_distribution(shape=gshape, locale_type=LT_PROCESS)
with _locale.empty(comms_and_distrib=cand, dtype="int64") as lary:
self.assertEqual(_np.dtype("int64"), lary.dtype)
self.assertSequenceEqual(list(lshape), list(lary.shape))
self.assertSequenceEqual(
list(lshape),
list(IndexingExtent(lary.intra_partition.rank_view_slice_n).shape)
)
with _locale.empty_like(lary) as lary1:
self.assertEqual(_np.dtype("int64"), lary1.dtype)
self.assertSequenceEqual(list(lshape), list(lary1.shape))
self.assertSequenceEqual(
list(lshape),
list(IndexingExtent(lary1.intra_partition.rank_view_slice_n).shape)
)
def test_zeros_shared_1d(self):
"""
Test for :func:`_locale.zeros` and :func:`_locale.zeros_like`.
"""
lshape = (10,)
gshape = (_mpi.COMM_WORLD.size * lshape[0],)
cand = create_distribution(shape=gshape)
with _locale.zeros(comms_and_distrib=cand, dtype="int64") as lary:
self.assertEqual(_np.dtype("int64"), lary.dtype)
cand.locale_comms.peer_comm.barrier()
self.assertTrue(_np.all(lary == 0))
with _locale.zeros_like(lary) as lary1:
self.assertEqual(_np.dtype("int64"), lary1.dtype)
cand.locale_comms.peer_comm.barrier()
self.assertTrue(_np.all(lary1 == 0))
def test_zeros_non_shared_1d(self):
"""
Test for :func:`_locale.zeros` and :func:`_locale.zeros_like`.
"""
lshape = (10,)
gshape = (_mpi.COMM_WORLD.size * lshape[0],)
cand = create_distribution(shape=gshape, locale_type=LT_PROCESS)
with _locale.zeros(comms_and_distrib=cand, dtype="int64") as lary:
self.assertEqual(_np.dtype("int64"), lary.dtype)
cand.locale_comms.peer_comm.barrier()
self.assertTrue(_np.all(lary == 0))
with _locale.zeros_like(lary) as lary1:
self.assertEqual(_np.dtype("int64"), lary1.dtype)
cand.locale_comms.peer_comm.barrier()
self.assertTrue(_np.all(lary1 == 0))
def test_ones_shared_1d(self):
"""
Test for :func:`_locale.ones` and :func:`_locale.ones_like`.
"""
lshape = (10,)
gshape = (_mpi.COMM_WORLD.size * lshape[0],)
cand = create_distribution(shape=gshape)
with _locale.ones(comms_and_distrib=cand, dtype="int64") as lary:
self.assertEqual(_np.dtype("int64"), lary.dtype)
cand.locale_comms.peer_comm.barrier()
self.assertTrue(_np.all(lary == 1))
with _locale.ones_like(lary) as lary1:
self.assertEqual(_np.dtype("int64"), lary1.dtype)
cand.locale_comms.peer_comm.barrier()
self.assertTrue(_np.all(lary1 == 1))
def test_ones_non_shared_1d(self):
"""
Test for :func:`_locale.ones` and :func:`_locale.ones_like`.
"""
lshape = (10,)
gshape = (_mpi.COMM_WORLD.size * lshape[0],)
cand = create_distribution(shape=gshape, locale_type=LT_PROCESS)
with _locale.ones(comms_and_distrib=cand, dtype="int64") as lary:
self.assertEqual(_np.dtype("int64"), lary.dtype)
cand.locale_comms.peer_comm.barrier()
self.assertTrue(_np.all(lary == 1))
with _locale.ones_like(lary) as lary1:
self.assertEqual(_np.dtype("int64"), lary1.dtype)
cand.locale_comms.peer_comm.barrier()
self.assertTrue(_np.all(lary1 == 1))
def test_copy_shared_1d(self):
"""
Test for :func:`_locale.copy`.
"""
lshape = (10,)
gshape = (_mpi.COMM_WORLD.size * lshape[0],)
cand = create_distribution(shape=gshape)
with _locale.ones(comms_and_distrib=cand, dtype="int64") as lary:
self.assertEqual(_np.dtype("int64"), lary.dtype)
lary.rank_view_n[...] = cand.locale_comms.peer_comm.rank
with _locale.copy(lary) as lary1:
self.assertEqual(_np.dtype("int64"), lary1.dtype)
cand.locale_comms.peer_comm.barrier()
self.assertTrue(_np.all(lary1 == lary))
def test_copy_non_shared_1d(self):
"""
Test for :func:`_locale.copy`.
"""
lshape = (10,)
gshape = (_mpi.COMM_WORLD.size * lshape[0],)
cand = create_distribution(shape=gshape, locale_type=LT_PROCESS)
with _locale.ones(comms_and_distrib=cand, dtype="int64") as lary:
self.assertEqual(_np.dtype("int64"), lary.dtype)
lary.rank_view_n[...] = cand.locale_comms.peer_comm.rank
with _locale.copy(lary) as lary1:
self.assertEqual(_np.dtype("int64"), lary1.dtype)
cand.locale_comms.peer_comm.barrier()
self.assertTrue(_np.all(lary1 == lary))
def do_test_views_2d(self, halo=0):
"""
Test for :meth:`mpi_array.locale.LndarrayProxy.rank_view_n`
and :meth:`mpi_array.locale.LndarrayProxy.rank_view_h`.
"""
lshape = _np.array((4, 3), dtype="int64")
gshape = lshape * _shape_factors(_mpi.COMM_WORLD.size, lshape.size)[::-1]
cands = \
[
create_distribution(shape=gshape, locale_type=LT_NODE, halo=halo),
create_distribution(shape=gshape, locale_type=LT_PROCESS, halo=halo)
]
for cand in cands:
with _locale.ones(comms_and_distrib=cand, dtype="int64") as lary:
self.assertEqual(_np.dtype("int64"), lary.dtype)
rank_logger = _logging.get_rank_logger(self.id(), comm=cand.locale_comms.peer_comm)
rank_logger.info(
(
"\n========================================================\n" +
"locale_extent = %s\n" +
"rank_view_slice_n = %s\n" +
"rank_view_slice_h = %s\n" +
"rank_view_relative_slice_n = %s\n" +
"^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^"
)
%
(
lary.locale_extent,
lary.intra_partition.rank_view_slice_n,
lary.intra_partition.rank_view_slice_h,
lary.intra_partition.rank_view_relative_slice_n,
)
)
if cand.locale_comms.intra_locale_comm.rank == 0:
lary.view_h[...] = -1
cand.locale_comms.intra_locale_comm.barrier()
lary.rank_view_n[...] = cand.locale_comms.peer_comm.rank
cand.locale_comms.intra_locale_comm.barrier()
if | _np.any(lary.halo > 0) | numpy.any |
# Copyright 2020 BULL SAS All rights reserved
"""This module provides the different tests for the resampling policies.
"""
import unittest
import numpy as np
from bbo.noise_reduction.resampling_policies import (
ResamplingPolicy,
SimpleResampling,
DynamicResampling,
DynamicResamplingParametric,
DynamicResamplingNonParametric,
DynamicResamplingNonParametric2,
)
def resampling_schedule(nbr_it):
return 1 / np.log(1 + nbr_it)
class TestResampling(unittest.TestCase):
"""Tests the proper implementation of the parent resampling
class.
"""
def test_parent_class(self):
"""Test the initialization of the Resampling parent
class.
"""
resampling_policy = ResamplingPolicy()
def test_not_implemented(self):
"""Test that a not implemented error is raised when calling the
resample method"""
resampling_policy = ResamplingPolicy()
with self.assertRaises(NotImplementedError):
resampling_policy.resample(history={})
class TestSimpleResampling(unittest.TestCase):
"""Tests the SimpleResampling class.
"""
def test_simple_resampling_resample(self):
"""Tests that the parameter are properly resampled when the
last parameter should be resampled.
"""
history = {
"fitness": np.array([10, 5, 4, 2, 15, 20]),
"parameters": np.array([[1, 2], [2, 3], [1, 3], [4, 3], [2, 1], [2, 1]]),
}
resampler = SimpleResampling(nbr_resamples=3)
self.assertTrue(resampler.resample(history))
def test_simple_resampling_no_resample(self):
"""Tests that the parameter are properly resampled when the
last parameter should not be resampled.
"""
history = {
"fitness": np.array([10, 5, 4, 2, 15, 20]),
"parameters": np.array([[1, 2], [2, 3], [1, 3], [2, 1], [2, 1], [2, 1]]),
}
resampler = SimpleResampling(nbr_resamples=3)
self.assertFalse(resampler.resample(history))
def test_simple_resampling_no_sequential(self):
"""Tests that when the samples are not sequentially ordered,
the resampling still happens properly.
"""
history = {
"fitness": np.array([10, 5, 4, 2, 15, 20]),
"parameters": np.array([[1, 2], [2, 3], [1, 3], [4, 3], [2, 1], [2, 1]]),
}
resampler = SimpleResampling(nbr_resamples=2)
self.assertFalse(resampler.resample(history))
class TestDynamicResampling(unittest.TestCase):
"""Tests the abstract class of dynamic resampling.
"""
def test_initialization(self):
"""Tests the proper initialization of the class.
"""
dynamic_resampling = DynamicResampling(percentage=0.5)
def test_resampling_schedule_None(self):
"""Tests the good definition of the resampling schedule when set to None.
"""
dynamic_resampling = DynamicResampling(percentage=0.5)
self.assertEqual(0.5, dynamic_resampling.resampling_schedule(1))
def test_resampling_schedule(self):
"""Tests the good definition of the resampling schedule.
"""
dynamic_resampling = DynamicResampling(
percentage=0.5, resampling_schedule=resampling_schedule
)
self.assertEqual(0.5 / np.log(2), dynamic_resampling.resampling_schedule(1))
def test_allow_resampling_schedule_None(self):
"""Tests that the allow resampling schedule behaves as expected when set to None.
"""
dynamic_resampling = DynamicResampling(percentage=0.5)
self.assertEqual(1, dynamic_resampling.allow_resampling_schedule(1))
def test_allow_resampling_schedule(self):
"""Tests that the allow resampling schedule behaves as expected when set to a value.
"""
dynamic_resampling = DynamicResampling(
percentage=0.5,
allow_resampling_start=5,
allow_resampling_schedule=resampling_schedule,
)
self.assertEqual(5 / np.log(2), dynamic_resampling.allow_resampling_schedule(1))
class TestDynamicResamplingParametric(unittest.TestCase):
"""Tests that the DynamicResamplingParametric class behaves as expected.
"""
def test_error_percentage(self):
"""Tests that dynamic resampling raises an error when the percentage is below 0.
"""
with self.assertRaises(ValueError):
DynamicResamplingParametric(percentage=-0.1)
def test_dynamic_resampling_formula(self):
"""
Tests that the computation performs what is expected.
"""
test_dynamic_resampling = DynamicResamplingParametric(0.2)
history = {
"fitness": np.array([10, 5, 4, 14, 15, 16]),
"parameters": | np.array([[1, 2], [2, 3], [1, 3], [2, 1], [2, 1], [2, 1]]) | numpy.array |
import argparse
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
import pdb
import os
from tensorflow.keras.callbacks import EarlyStopping, ReduceLROnPlateau
from tensorflow.keras.layers import Input, Dense, Conv2D, BatchNormalization, MaxPool1D, Flatten
from tensorflow.keras.models import Model, Sequential
from tensorflow.keras import regularizers
from tensorflow.keras import optimizers
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.models import load_model
def get_argparser():
parser = argparse.ArgumentParser()
### CNNs
parser.add_argument("--units",
default=16,
type=int,
help="Number of units in hidden layers of the DNN")
parser.add_argument("--layers",
default=2,
type=int,
help="Number of layers in the DNN")
parser.add_argument("--reg",
default=0.1,
type=float,
help="Regularization weight of the DNN")
parser.add_argument("--data_type",
default='rgb',
type=str,
help="Which data to evaluate on: (ir, rgb, cover)")
return parser
if __name__ == '__main__':
parser = get_argparser()
args = parser.parse_args()
data_type = args.data_type
########################
# Load and process data
########################
layers_arr = [1, 2, 3, 4]
units_arr = [4, 8, 16, 32, 64]
reg_arr = [0.001, 0.005, 0.01, 0.05, 0.1]
plot_obj_train = np.zeros((len(layers_arr), len(units_arr), len(reg_arr)))
plot_obj_val = np.zeros((len(layers_arr), len(units_arr), len(reg_arr)))
for layers_idx in range(len(layers_arr)):
for units_idx in range(len(units_arr)):
for reg_idx in range(len(reg_arr)):
layers = layers_arr[layers_idx]
units = units_arr[units_idx]
regularizer = reg_arr[reg_idx]
kernel = 5
model_name = f'CNN10-layers{layers}-units{units}-kernel{kernel}-reg{regularizer}-{data_type}'
loss_obj = np.loadtxt(f'output-data/train-history-{model_name}.csv', delimiter=',')
print(f'For model {model_name} Best train accuracy is {np.max(loss_obj[2])}')
print(f'For model {model_name} Best val accuracy is {np.max(loss_obj[3])}')
plot_obj_train[layers_idx, units_idx, reg_idx] = np.max(loss_obj[2])
plot_obj_val[layers_idx, units_idx, reg_idx] = np.max(loss_obj[3])
# Pick one regularization
for ri in range(len(reg_arr)):
reg_amt = reg_arr[ri]
cut_plot_obj = plot_obj_train[:, :, ri]
fig, ax = plt.subplots(1, 1, figsize=(10, 6))
width = 0.15
n_col = len(layers_arr)
ax.set_title(f'Hyperparameter Search for {data_type.upper()} models', fontsize=24)
ax.set_ylim([0, 1.05])
ax.bar(np.arange(n_col) - 2 * width, cut_plot_obj[:, 0], width, label='4 filters')
ax.bar(np.arange(n_col) - 1 * width, cut_plot_obj[:, 1], width, label='8 filters')
ax.bar(np.arange(n_col) + 0 * width, cut_plot_obj[:, 2], width, label='16 filters')
ax.bar(np.arange(n_col) + 1 * width, cut_plot_obj[:, 3], width, label='32 filters')
ax.bar(np.arange(n_col) + 2 * width, cut_plot_obj[:, 4], width, label='64 filters')
ax.set_axisbelow(True)
ax.grid(True, which='major', axis='y', linestyle = '--')
ax.legend(loc='best', ncol=3, fontsize=20)
ax.set_xticks(np.arange(n_col))
ax.set_xticklabels(layers_arr, fontsize=16)
ax.set_xlabel('Layers', fontsize=24)
ax.set_ylabel('Training Accuracy', fontsize=24)
fig.tight_layout(rect=[0, 0, 1, 1])
plt.savefig(f'plot-hyperparam-{data_type}-train-{reg_amt}.pdf')
####################################################
cut_plot_obj = plot_obj_val[:, :, 4]
fig, ax = plt.subplots(1, 1, figsize=(10, 6))
width = 0.15
n_col = len(layers_arr)
ax.set_title(f'Hyperparameter Search for {data_type.upper()} models', fontsize=24)
ax.set_ylim([0, 1.05])
ax.bar( | np.arange(n_col) | numpy.arange |
#! /usr/bin/env python
# coding=utf-8
# Copyright (c) 2019 Uber Technologies, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import numpy as np
from scipy.signal import lfilter
from scipy.signal.windows import get_window
def _pre_emphasize_data(data, emphasize_value=0.97):
filter_window = np.asarray([1, -emphasize_value])
pre_emphasized_data = lfilter(filter_window, 1, data)
return pre_emphasized_data
def get_length_in_samp(sampling_rate_in_hz, length_in_s):
return int(sampling_rate_in_hz * length_in_s)
def get_group_delay(raw_data, sampling_rate_in_hz, window_length_in_s,
window_shift_in_s, num_fft_points, window_type):
X_stft_transform = _get_stft(raw_data, sampling_rate_in_hz,
window_length_in_s, window_shift_in_s,
num_fft_points, window_type=window_type)
Y_stft_transform = _get_stft(raw_data, sampling_rate_in_hz,
window_length_in_s, window_shift_in_s,
num_fft_points, window_type=window_type,
data_transformation='group_delay')
X_stft_transform_real = np.real(X_stft_transform)
X_stft_transform_imag = np.imag(X_stft_transform)
Y_stft_transform_real = np.real(Y_stft_transform)
Y_stft_transform_imag = np.imag(Y_stft_transform)
nominator = np.multiply(X_stft_transform_real,
Y_stft_transform_real) + np.multiply(
X_stft_transform_imag, Y_stft_transform_imag)
denominator = np.square(np.abs(X_stft_transform))
group_delay = np.divide(nominator, denominator + 1e-10)
assert not np.isnan(
group_delay).any(), 'There are NaN values in group delay'
return np.transpose(group_delay)
def get_phase_stft_magnitude(raw_data, sampling_rate_in_hz, window_length_in_s,
window_shift_in_s, num_fft_points, window_type):
stft = _get_stft(raw_data, sampling_rate_in_hz, window_length_in_s,
window_shift_in_s, num_fft_points,
window_type=window_type)
abs_stft = np.abs(stft)
phase = np.angle(stft)
stft_phase = np.concatenate((phase, abs_stft), axis=1)
return np.transpose(stft_phase)
def get_stft_magnitude(raw_data, sampling_rate_in_hz, window_length_in_s,
window_shift_in_s, num_fft_points, window_type):
stft = _get_stft(raw_data, sampling_rate_in_hz, window_length_in_s,
window_shift_in_s, num_fft_points,
window_type=window_type)
stft_magnitude = np.abs(stft)
return np.transpose(stft_magnitude)
################################################################################
# The following code for FBank is adapted from jameslyons/python_speech_features
# MIT licensed implementation
# https://github.com/jameslyons/python_speech_features/blob/40c590269b57c64a8c1f1ddaaff2162008d1850c/python_speech_features/base.py#L84################################################################################
################################################################################
def get_fbank(raw_data, sampling_rate_in_hz, window_length_in_s,
window_shift_in_s, num_fft_points, window_type,
num_filter_bands):
stft = _get_stft(raw_data, sampling_rate_in_hz, window_length_in_s,
window_shift_in_s, num_fft_points,
window_type=window_type,
zero_mean_offset=True)
stft_power = np.abs(stft) ** 2
upper_limit_freq = int(sampling_rate_in_hz / 2)
upper_limit_mel = _convert_hz_to_mel(upper_limit_freq)
lower_limit_mel = 0
list_mel_points = np.linspace(lower_limit_mel, upper_limit_mel,
num_filter_bands + 2)
mel_fbank_matrix = _get_mel_fbank_matrix(list_mel_points, num_filter_bands,
num_fft_points,
sampling_rate_in_hz)
mel_fbank_feature = np.dot(stft_power, np.transpose(mel_fbank_matrix))
log_mel_fbank_feature = np.log(mel_fbank_feature + 1.0e-10)
return np.transpose(log_mel_fbank_feature)
def _get_mel_fbank_matrix(list_mel_points, num_filter_bands, num_fft_points,
sampling_rate_in_hz):
num_ess_fft_points = get_non_symmetric_length(num_fft_points)
freq_scale = (num_fft_points + 1) / sampling_rate_in_hz
freq_bins_on_mel_scale = np.floor(
freq_scale * _convert_mel_to_hz(list_mel_points))
mel_scaled_fbank = np.zeros((num_filter_bands, num_ess_fft_points),
dtype=np.float32)
for filt_idx in range(num_filter_bands):
start_bin_freq = freq_bins_on_mel_scale[filt_idx]
middle_bin_freq = freq_bins_on_mel_scale[filt_idx + 1]
end_bin_freq = freq_bins_on_mel_scale[filt_idx + 2]
mel_scaled_fbank[filt_idx] = _create_triangular_filter(start_bin_freq,
middle_bin_freq,
end_bin_freq,
num_ess_fft_points)
return mel_scaled_fbank
def _create_triangular_filter(start_bin_freq, middle_bin_freq, end_bin_freq,
num_ess_fft_points):
filter_window = np.zeros(num_ess_fft_points, dtype=np.float32)
filt_support_begin = middle_bin_freq - start_bin_freq
filt_support_end = end_bin_freq - middle_bin_freq
for freq in range(int(start_bin_freq), int(middle_bin_freq)):
filter_window[freq] = (freq - start_bin_freq) / filt_support_begin
for freq in range(int(middle_bin_freq), int(end_bin_freq)):
filter_window[freq] = (end_bin_freq - freq) / filt_support_end
return filter_window
def _convert_hz_to_mel(hz):
return 2595.0 * np.log10(1 + hz / 700.0)
def _convert_mel_to_hz(mel):
return 700.0 * (10 ** (mel / 2595.0) - 1)
def _get_stft(raw_data, sampling_rate_in_hz, window_length_in_s,
window_shift_in_s, num_fft_points, window_type,
data_transformation=None, zero_mean_offset=False):
pre_emphasized_data = _pre_emphasize_data(raw_data)
stft = _short_time_fourier_transform(pre_emphasized_data,
sampling_rate_in_hz,
window_length_in_s, window_shift_in_s,
num_fft_points, window_type,
data_transformation, zero_mean_offset)
non_symmetric_stft = get_non_symmetric_data(stft)
return non_symmetric_stft
def _short_time_fourier_transform(data, sampling_rate_in_hz,
window_length_in_s,
window_shift_in_s, num_fft_points,
window_type, data_transformation=None,
zero_mean_offset=False):
window_length_in_samp = get_length_in_samp(window_length_in_s,
sampling_rate_in_hz)
window_shift_in_samp = get_length_in_samp(window_shift_in_s,
sampling_rate_in_hz)
preprocessed_data_matrix = _preprocess_to_padded_matrix(data,
window_length_in_samp,
window_shift_in_samp,
zero_mean_offset=zero_mean_offset)
weighted_data_matrix = _weight_data_matrix(
preprocessed_data_matrix,
window_type,
data_transformation=data_transformation
)
fft = np.fft.fft(weighted_data_matrix, n=num_fft_points)
return fft
def _preprocess_to_padded_matrix(data, window_length_in_samp,
window_shift_in_samp, zero_mean_offset=False):
num_input = data.shape[0]
num_output = get_num_output_padded_to_fit_input(num_input,
window_length_in_samp,
window_shift_in_samp)
zero_padded_matrix = np.zeros((num_output, window_length_in_samp),
dtype=np.float)
for num_output_idx in range(num_output):
start_idx = window_shift_in_samp * num_output_idx
is_last_output = num_output_idx == num_output - 1
end_idx = start_idx + window_length_in_samp if not is_last_output else num_input
end_padded_idx = window_length_in_samp if not is_last_output else end_idx - start_idx
window_data = data[start_idx:end_idx]
if zero_mean_offset:
window_data = window_data - | np.mean(window_data) | numpy.mean |
import numpy as np
from nsgt.fft import rfftp, irfftp, fftp, ifftp
import unittest
class TestFFT(unittest.TestCase):
def __init__(self, methodName, n=10000):
super(TestFFT, self).__init__(methodName)
self.n = n
def test_rfft(self):
seq = np.random.random(self.n)
ft = rfftp()
a = ft(seq)
b = np.fft.rfft(seq)
self.assertTrue(np.allclose(a, b))
def test_irfft(self):
seq = np.random.random(self.n)+np.random.random(self.n)*1.j
outn = (self.n-1)*2 + np.random.randint(0,2) # even or odd output size
ft = irfftp()
a = ft(seq, outn=outn)
b = np.fft.irfft(seq, n=outn)
self.assertTrue(np.allclose(a, b))
def test_fft(self):
seq = np.random.random(self.n)
ft = fftp()
a = ft(seq)
b = np.fft.fft(seq)
self.assertTrue(np.allclose(a, b))
def test_ifft(self):
seq = np.random.random(self.n)+np.random.random(self.n)*1.j
ft = ifftp()
a = ft(seq)
b = | np.fft.ifft(seq) | numpy.fft.ifft |
"""Krylov-subspaces model order reduction techniques
"""
import numpy as np
import scipy.linalg as sclalg
import sharpy.linear.src.libss as libss
import time
import sharpy.utils.settings as settings
import sharpy.utils.cout_utils as cout
import sharpy.utils.rom_interface as rom_interface
import sharpy.utils.h5utils as h5
import sharpy.rom.utils.krylovutils as krylovutils
import warnings as warn
@rom_interface.rom
class Krylov(rom_interface.BaseRom):
"""
Model Order Reduction Methods for Single Input Single Output (SISO) and MIMO
Linear Time-Invariant (LTI) Systems using
moment matching (Krylov Methods).
Examples:
General calling sequences for different systems
SISO single point interpolation:
>>> algorithm = 'one_sided_arnoldi'
>>> interpolation_point = np.array([0.0])
>>> krylov_r = 4
>>>
>>> rom = Krylov()
>>> rom.initialise(sharpy_data, FullOrderModelSS)
>>> rom.run(algorithm, krylov_r, interpolation_point)
2 by 2 MIMO with tangential, multipoint interpolation:
>>> algorithm = 'dual_rational_arnoldi'
>>> interpolation_point = np.array([0.0, 1.0j])
>>> krylov_r = 4
>>> right_vector = np.block([[1, 0], [0, 1]])
>>> left_vector = right_vector
>>>
>>> rom = Krylov()
>>> rom.initialise(sharpy_data, FullOrderModelSS)
>>> rom.run(algorithm, krylov_r, interpolation_point, right_vector, left_vector)
2 by 2 MIMO multipoint interpolation:
>>> algorithm = 'mimo_rational_arnoldi'
>>> interpolation_point = np.array([0.0])
>>> krylov_r = 4
>>>
>>> rom = Krylov()
>>> rom.initialise(sharpy_data, FullOrderModelSS)
>>> rom.run(algorithm, krylov_r, interpolation_point)
"""
rom_id = 'Krylov'
settings_types = dict()
settings_default = dict()
settings_description = dict()
settings_options = dict()
settings_types['print_info'] = 'bool'
settings_default['print_info'] = True
settings_description['print_info'] = 'Write ROM information to screen and log'
settings_types['frequency'] = 'list(complex)'
settings_default['frequency'] = [0]
settings_description['frequency'] = 'Interpolation points in the continuous time complex plane [rad/s]'
settings_types['algorithm'] = 'str'
settings_default['algorithm'] = ''
settings_description['algorithm'] = 'Krylov reduction method algorithm'
settings_types['r'] = 'int'
settings_default['r'] = 1
settings_description['r'] = 'Moments to match at the interpolation points'
settings_types['single_side'] = 'str'
settings_default['single_side'] = ''
settings_description['single_side'] = 'Construct the rom using a single side. Leave blank (or empty string) for both.'
settings_options['single_side'] = ['controllability', 'observability']
settings_types['tangent_input_file'] = 'str'
settings_default['tangent_input_file'] = ''
settings_description['tangent_input_file'] = 'Filepath to .h5 file containing tangent interpolation vectors'
settings_types['restart_arnoldi'] = 'bool'
settings_default['restart_arnoldi'] = False
settings_description['restart_arnoldi'] = 'Restart Arnoldi iteration with r-=1 if ROM is unstable'
settings_table = settings.SettingsTable()
__doc__ += settings_table.generate(settings_types, settings_default, settings_description, settings_options)
supported_methods = ('one_sided_arnoldi',
'two_sided_arnoldi',
'dual_rational_arnoldi',
'mimo_rational_arnoldi',
'mimo_block_arnoldi')
def __init__(self):
self.settings = dict()
self.frequency = None
self.algorithm = None
self.ss = None
self.r = 1
self.V = None
self.H = None
self.W = None
self.ssrom = None
self.sstype = None
self.nfreq = None
self.restart_arnoldi = None
self.stable = None
self.cpu_summary = dict()
self.eigenvalue_table = None
def initialise(self, in_settings=None):
if in_settings is not None:
self.settings = in_settings
settings.to_custom_types(self.settings, self.settings_types, self.settings_default, self.settings_options)
try:
if self.settings['print_info']:
cout.cout_wrap('Initialising Krylov Model Order Reduction')
except ValueError:
pass
self.algorithm = self.settings['algorithm']
if self.algorithm not in self.supported_methods:
raise NotImplementedError('Algorithm %s not recognised, check for spelling or it'
'could be that is not yet implemented'
% self.algorithm)
self.frequency = np.array(self.settings['frequency'])
self.r = self.settings['r'].value
self.restart_arnoldi = self.settings['restart_arnoldi'].value
try:
self.nfreq = self.frequency.shape[0]
except AttributeError:
self.nfreq = 1
def run(self, ss):
"""
Performs Model Order Reduction employing Krylov space projection methods.
Supported methods include:
========================= ==================== ==========================================================
Algorithm Interpolation Points Systems
========================= ==================== ==========================================================
``one_sided_arnoldi`` 1 SISO Systems
``two_sided_arnoldi`` 1 SISO Systems
``dual_rational_arnoldi`` K SISO systems and Tangential interpolation for MIMO systems
``mimo_rational_arnoldi`` K MIMO systems. Uses vector-wise construction (more robust)
``mimo_block_arnoldi`` K MIMO systems. Uses block Arnoldi methods (more efficient)
========================= ==================== ==========================================================
Args:
ss (sharpy.linear.src.libss.ss): State space to reduce
Returns:
(libss.ss): Reduced state space system
"""
self.ss = ss
if self.settings['print_info']:
cout.cout_wrap('Model Order Reduction in progress...')
self.print_header()
if self.ss.dt is None:
self.sstype = 'ct'
else:
self.sstype = 'dt'
self.frequency = np.exp(self.frequency * ss.dt)
t0 = time.time()
Ar, Br, Cr = self.__getattribute__(self.algorithm)(self.frequency, self.r)
self.ssrom = libss.ss(Ar, Br, Cr, self.ss.D, self.ss.dt)
self.stable = self.check_stability(restart_arnoldi=self.restart_arnoldi)
if not self.stable:
pass
warn.warn('Reduced Order Model Unstable')
# Under development
# TL, TR = self.restart()
# Wtr, Vr = self.restart()
# TL, TR = self.stable_realisation()
# self.ssrom = libss.ss(TL.T.dot(Ar.dot(TR)), TL.T.dot(Br), Cr.dot(TR), self.ss.D, self.ss.dt)
# self.ssrom = libss.ss(Wtr.dot(self.ssrom.A.dot(Vr)), Wtr.dot(self.ssrom.B), self.ssrom.C.dot(Vr), self.ss.D, self.ss.dt)
# self.stable = self.check_stability(restart_arnoldi=self.restart_arnoldi)
t_rom = time.time() - t0
self.cpu_summary['run'] = t_rom
if self.settings['print_info']:
cout.cout_wrap('System reduced from order %d to ' % self.ss.states)
cout.cout_wrap('\tn = %d states' % self.ssrom.states, 1)
cout.cout_wrap('...Completed Model Order Reduction in %.2f s' % t_rom)
return self.ssrom
def print_header(self):
cout.cout_wrap('Moment Matching Krylov Model Reduction')
cout.cout_wrap('\tConstruction Algorithm:')
cout.cout_wrap('\t\t%s' % self.algorithm, 1)
cout.cout_wrap('\tInterpolation points:')
if self.frequency.dtype == complex:
cout.cout_wrap(self.nfreq * '\t\tsigma = %4f + %4fj [rad/s]\n' %tuple(self.frequency.view(float)), 1)
else:
cout.cout_wrap(self.nfreq * '\t\tsigma = %4f [rad/s]\n' % self.frequency, 1)
cout.cout_wrap('\tKrylov order:')
cout.cout_wrap('\t\tr = %d' % self.r, 1)
def one_sided_arnoldi(self, frequency, r):
r"""
One-sided Arnoldi method expansion about a single interpolation point, :math:`\sigma`.
The projection matrix :math:`\mathbf{V}` is constructed using an order :math:`r` Krylov space. The space for
a single finite interpolation point known as a Pade approximation is described by:
.. math::
\text{range}(\textbf{V}) = \mathcal{K}_r((\sigma\mathbf{I}_n - \mathbf{A})^{-1},
(\sigma\mathbf{I}_n - \mathbf{A})^{-1}\mathbf{b})
In the case of an interpolation about infinity, the problem is known as partial realisation and the Krylov
space is
.. math::
\text{range}(\textbf{V}) = \mathcal{K}_r(\mathbf{A}, \mathbf{b})
The resulting orthogonal projection leads to the following reduced order system:
.. math::
\hat{\Sigma} : \left(\begin{array}{c|c} \hat{A} & \hat{B} \\
\hline \hat{C} & {D}\end{array}\right)
\text{with } \begin{cases}\hat{A}=V^TAV\in\mathbb{R}^{k\times k},\,\\
\hat{B}=V^TB\in\mathbb{R}^{k\times m},\,\\
\hat{C}=CV\in\mathbb{R}^{p\times k},\,\\
\hat{D}=D\in\mathbb{R}^{p\times m}\end{cases}
Args:
frequency (complex): Interpolation point :math:`\sigma \in \mathbb{C}`
r (int): Number of moments to match. Equivalent to Krylov space order and order of the ROM.
Returns:
tuple: The reduced order model matrices: :math:`\mathbf{A}_r`, :math:`\mathbf{B}_r` and :math:`\mathbf{C}_r`
"""
A = self.ss.A
B = self.ss.B
C = self.ss.C
nx = A.shape[0]
if frequency != np.inf and frequency is not None:
lu_A = krylovutils.lu_factor(frequency, A)
V = krylovutils.construct_krylov(r, lu_A, B, 'Pade', 'b')
else:
V = krylovutils.construct_krylov(r, A, B, 'partial_realisation', 'b')
# Reduced state space model
Ar = V.T.dot(A.dot(V))
Br = V.T.dot(B)
Cr = C.dot(V)
self.V = V
self.W = V
return Ar, Br, Cr
def two_sided_arnoldi(self, frequency, r):
r"""
Two-sided projection with a single interpolation point following the Arnoldi procedure. Very similar to the
one-sided method available, but it adds the projection :math:`\mathbf{W}` built using the Krylov space for the
:math:`\mathbf{c}` vector:
.. math::
\mathcal{K}_r((\sigma\mathbf{I}_n - \mathbf{A})^{-T},
(\sigma\mathbf{I}_n - \mathbf{A})^{-T}\mathbf{c}^T)\subseteq\mathcal{W}=\text{range}(\mathbf{W})
The oblique projection :math:`\mathbf{VW}^T` matches twice as many moments as the single sided projection.
The resulting system takes the form:
.. math::
\hat{\Sigma} : \left(\begin{array}{c|c} \hat{A} & \hat{B} \\
\hline \hat{C} & {D}\end{array}\right)
\text{with } \begin{cases}\hat{A}=W^TAV\in\mathbb{R}^{k\times k},\,\\
\hat{B}=W^TB\in\mathbb{R}^{k\times m},\,\\
\hat{C}=CV\in\mathbb{R}^{p\times k},\,\\
\hat{D}=D\in\mathbb{R}^{p\times m}\end{cases}
Args:
frequency (complex): Interpolation point :math:`\sigma \in \mathbb{C}`
r (int): Number of moments to match on each side. The resulting ROM will be of order :math:`2r`.
Returns:
tuple: The reduced order model matrices: :math:`\mathbf{A}_r`, :math:`\mathbf{B}_r` and :math:`\mathbf{C}_r`.
"""
A = self.ss.A
B = self.ss.B
C = self.ss.C
nx = A.shape[0]
if frequency != np.inf and frequency is not None:
lu_A = krylovutils.lu_factor(frequency, A)
V = krylovutils.construct_krylov(r, lu_A, B, 'Pade', 'b')
W = krylovutils.construct_krylov(r, lu_A, C.T, 'Pade', 'c')
else:
V = krylovutils.construct_krylov(r, A, B, 'partial_realisation', 'b')
W = krylovutils.construct_krylov(r, A, C.T, 'partial_realisation', 'c')
T = W.T.dot(V)
Tinv = sclalg.inv(T)
reduction_checks(T, Tinv)
self.W = W
self.V = V
# Reduced state space model
Ar = W.T.dot(self.ss.A.dot(V.dot(Tinv)))
Br = W.T.dot(self.ss.B)
Cr = self.ss.C.dot(V.dot(Tinv))
return Ar, Br, Cr
def real_rational_arnoldi(self, frequency, r):
"""
When employing complex frequencies, the projection matrix can be normalised to be real
Following Algorithm 1b in Lee(2006)
Args:
frequency:
r:
Returns:
"""
raise NotImplementedError('Real valued rational Arnoldi Method Work in progress - use mimo_rational_arnoldi')
### Not working, having trouble with the last column of H. need to investigate the background behind the creation of H and see hwat can be done
A = self.ss.A
B = self.ss.B
C = self.ss.C
nx = A.shape[0]
nfreq = frequency.shape[0]
# Columns of matrix v
v_ncols = 2 * np.sum(r)
# Output projection matrices
V = np.zeros((nx, v_ncols),
dtype=float)
H = np.zeros((v_ncols, v_ncols),
dtype=float)
res = np.zeros((nx,v_ncols+2),
dtype=float)
# lu_A = krylovutils.lu_factor(frequency[0] * np.eye(nx) - A)
v_res = sclalg.lu_solve(lu_A, B)
H[0, 0] = np.linalg.norm(v_res)
V[:, 0] = v_res.real / H[0, 0]
k = 0
for i in range(nfreq):
for j in range(r[i]):
# k = 2*(i*r[i] + j)
print("i = %g\t j = %g\t k = %g" % (i, j, k))
# res[:, k] = np.imag(v_res)
# if k > 0:
# res[:, k-1] = np.real(v_res)
#
# # Working on the last finished column i.e. k-1 only when k>0
# if k > 0:
# for t in range(k):
# H[t, k-1] = V[:, t].T.dot(res[:, k-1])
# res[:, k-1] -= res[:, k-1] - H[t, k-1] * V[:, t]
#
# H[k, k-1] = np.linalg.norm(res[:, k-1])
# V[:, k] = res[:, k-1] / H[k, k-1]
#
# # Normalise working column k
# for t in range(k+1):
# H[t, k] = V[:, t].T.dot(res[:, k])
# res[:, k] -= H[t, k] * V[:, t]
#
# # Subdiagonal term
# H[k+1, k] = np.linalg.norm(res[:, k])
# V[:, k + 1] = res[:, k] / np.linalg.norm(res[:, k])
#
# if j == r[i] - 1 and i < nfreq - 1:
# lu_A = krylovutils.lu_factor(frequency[i+1] * np.eye(nx) - A)
# v_res = sclalg.lu_solve(lu_A, B)
# else:
# v_res = - sclalg.lu_solve(lu_A, V[:, k+1])
if k == 0:
V[:, 0] = v_res.real / np.linalg.norm(v_res.real)
else:
res[:, k] = np.imag(v_res)
res[:, k-1] = np.real(v_res)
for t in range(k):
H[t, k-1] = np.linalg.norm(res[:, k-1])
res[:, k-1] -= H[t, k-1]*V[:, t]
H[k, k-1] = np.linalg.norm(res[:, k-1])
V[:, k] = res[:, k-1] / H[k, k-1]
if k == 0:
H[0, 0] = V[:, 0].T.dot(v_res.imag)
res[:, 0] -= H[0, 0] * V[:, 0]
else:
for t in range(k+1):
H[t, k] = V[:, t].T.dot(res[:, k])
res[:, k] -= H[t, k] * V[:, t]
H[k+1, k] = np.linalg.norm(res[:, k])
V[:, k+1] = res[:, k] / H[k+1, k]
if j == r[i] - 1 and i < nfreq - 1:
lu_A = krylovutils.lu_factor(frequency[i+1], A)
v_res = sclalg.lu_solve(lu_A, B)
else:
v_res = - sclalg.lu_solve(lu_A, V[:, k+1])
k += 2
# Add last column of H
print(k)
res[:, k-1] = - sclalg.lu_solve(lu_A, V[:, k-1])
for t in range(k-1):
H[t, k-1] = V[:, t].T.dot(res[:, k-1])
res[:, k-1] -= H[t, k-1]*V[:, t]
self.V = V
self.H = H
Ar = V.T.dot(A.dot(V))
Br = V.T.dot(B)
Cr = C.dot(V)
return Ar, Br, Cr
def dual_rational_arnoldi(self, frequency, r):
r"""
Dual Rational Arnoli Interpolation for SISO sytems [1] and MIMO systems through tangential interpolation [2].
Effectively the same as the two_sided_arnoldi and the resulting V matrices for each interpolation point are
concatenated
.. math::
\bigcup\limits_{k = 1}^K\mathcal{K}_{b_k}((\sigma_i\mathbf{I}_n - \mathbf{A})^{-1}, (\sigma_i\mathbf{I}_n
- \mathbf{A})^{-1}\mathbf{b})\subseteq\mathcal{V}&=\text{range}(\mathbf{V}) \\
\bigcup\limits_{k = 1}^K\mathcal{K}_{c_k}((\sigma_i\mathbf{I}_n - \mathbf{A})^{-T}, (\sigma_i\mathbf{I}_n
- \mathbf{A})^{-T}\mathbf{c}^T)\subseteq\mathcal{Z}&=\text{range}(\mathbf{Z})
For MIMO systems, tangential interpolation is used through the right and left tangential direction vectors
:math:`\mathbf{r}_i` and :math:`\mathbf{l}_i`.
.. math::
\bigcup\limits_{k = 1}^K\mathcal{K}_{b_k}((\sigma_i\mathbf{I}_n - \mathbf{A})^{-1}, (\sigma_i\mathbf{I}_n
- \mathbf{A})^{-1}\mathbf{Br}_i)\subseteq\mathcal{V}&=\text{range}(\mathbf{V}) \\
\bigcup\limits_{k = 1}^K\mathcal{K}_{c_k}((\sigma_i\mathbf{I}_n - \mathbf{A})^{-T}, (\sigma_i\mathbf{I}_n
- \mathbf{A})^{-T}\mathbf{C}^T\mathbf{l}_i)\subseteq\mathcal{Z}&=\text{range}(\mathbf{Z})
Args:
frequency (np.ndarray): Array containing the interpolation points
:math:`\sigma = \{\sigma_1, \dots, \sigma_K\}\in\mathbb{C}`
r (int): Krylov space order :math:`b_k` and :math:`c_k`. At the moment, different orders for the
controllability and observability constructions are not supported.
right_tangent (np.ndarray): Matrix containing the right tangential direction interpolation vector for
each interpolation point in column form, i.e. :math:`\mathbf{r}\in\mathbb{R}^{m \times K}`.
left_tangent (np.ndarray): Matrix containing the left tangential direction interpolation vector for
each interpolation point in column form, i.e. :math:`\mathbf{l}\in\mathbb{R}^{p \times K}`.
Returns:
tuple: The reduced order model matrices: :math:`\mathbf{A}_r`, :math:`\mathbf{B}_r` and :math:`\mathbf{C}_r`.
References:
[1] Grimme
[2] Gallivan
"""
A = self.ss.A
B = self.ss.B
C = self.ss.C
nx = self.ss.states
nu = self.ss.inputs
ny = self.ss.outputs
B.shape = (nx, nu)
if nu != 1:
left_tangent, right_tangent, rc, ro, fc, fo = self.load_tangent_vectors()
assert right_tangent is not None and left_tangent is not None, 'Missing interpolation vectors for MIMO case'
else:
fc = | np.array(frequency) | numpy.array |
"""
Testing for the approximate neighbor search using
Locality Sensitive Hashing Forest module
(sklearn.neighbors.LSHForest).
"""
# Author: <NAME>, <NAME>
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_array_less
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import ignore_warnings
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.neighbors import LSHForest
from sklearn.neighbors import NearestNeighbors
def test_neighbors_accuracy_with_n_candidates():
# Checks whether accuracy increases as `n_candidates` increases.
n_candidates_values = np.array([.1, 50, 500])
n_samples = 100
n_features = 10
n_iter = 10
n_points = 5
rng = np.random.RandomState(42)
accuracies = np.zeros(n_candidates_values.shape[0], dtype=float)
X = rng.rand(n_samples, n_features)
for i, n_candidates in enumerate(n_candidates_values):
lshf = LSHForest(n_candidates=n_candidates)
ignore_warnings(lshf.fit)(X)
for j in range(n_iter):
query = X[rng.randint(0, n_samples)].reshape(1, -1)
neighbors = lshf.kneighbors(query, n_neighbors=n_points,
return_distance=False)
distances = pairwise_distances(query, X, metric='cosine')
ranks = np.argsort(distances)[0, :n_points]
intersection = np.intersect1d(ranks, neighbors).shape[0]
ratio = intersection / float(n_points)
accuracies[i] = accuracies[i] + ratio
accuracies[i] = accuracies[i] / float(n_iter)
# Sorted accuracies should be equal to original accuracies
assert_true(np.all(np.diff(accuracies) >= 0),
msg="Accuracies are not non-decreasing.")
# Highest accuracy should be strictly greater than the lowest
assert_true(np.ptp(accuracies) > 0,
msg="Highest accuracy is not strictly greater than lowest.")
def test_neighbors_accuracy_with_n_estimators():
# Checks whether accuracy increases as `n_estimators` increases.
n_estimators = np.array([1, 10, 100])
n_samples = 100
n_features = 10
n_iter = 10
n_points = 5
rng = np.random.RandomState(42)
accuracies = np.zeros(n_estimators.shape[0], dtype=float)
X = rng.rand(n_samples, n_features)
for i, t in enumerate(n_estimators):
lshf = LSHForest(n_candidates=500, n_estimators=t)
ignore_warnings(lshf.fit)(X)
for j in range(n_iter):
query = X[rng.randint(0, n_samples)].reshape(1, -1)
neighbors = lshf.kneighbors(query, n_neighbors=n_points,
return_distance=False)
distances = pairwise_distances(query, X, metric='cosine')
ranks = np.argsort(distances)[0, :n_points]
intersection = np.intersect1d(ranks, neighbors).shape[0]
ratio = intersection / float(n_points)
accuracies[i] = accuracies[i] + ratio
accuracies[i] = accuracies[i] / float(n_iter)
# Sorted accuracies should be equal to original accuracies
assert_true(np.all(np.diff(accuracies) >= 0),
msg="Accuracies are not non-decreasing.")
# Highest accuracy should be strictly greater than the lowest
assert_true(np.ptp(accuracies) > 0,
msg="Highest accuracy is not strictly greater than lowest.")
@ignore_warnings
def test_kneighbors():
# Checks whether desired number of neighbors are returned.
# It is guaranteed to return the requested number of neighbors
# if `min_hash_match` is set to 0. Returned distances should be
# in ascending order.
n_samples = 12
n_features = 2
n_iter = 10
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest(min_hash_match=0)
# Test unfitted estimator
assert_raises(ValueError, lshf.kneighbors, X[0])
ignore_warnings(lshf.fit)(X)
for i in range(n_iter):
n_neighbors = rng.randint(0, n_samples)
query = X[rng.randint(0, n_samples)].reshape(1, -1)
neighbors = lshf.kneighbors(query, n_neighbors=n_neighbors,
return_distance=False)
# Desired number of neighbors should be returned.
assert_equal(neighbors.shape[1], n_neighbors)
# Multiple points
n_queries = 5
queries = X[rng.randint(0, n_samples, n_queries)]
distances, neighbors = lshf.kneighbors(queries,
n_neighbors=1,
return_distance=True)
assert_equal(neighbors.shape[0], n_queries)
assert_equal(distances.shape[0], n_queries)
# Test only neighbors
neighbors = lshf.kneighbors(queries, n_neighbors=1,
return_distance=False)
assert_equal(neighbors.shape[0], n_queries)
# Test random point(not in the data set)
query = rng.randn(n_features).reshape(1, -1)
lshf.kneighbors(query, n_neighbors=1,
return_distance=False)
# Test n_neighbors at initialization
neighbors = lshf.kneighbors(query, return_distance=False)
assert_equal(neighbors.shape[1], 5)
# Test `neighbors` has an integer dtype
assert_true(neighbors.dtype.kind == 'i',
msg="neighbors are not in integer dtype.")
def test_radius_neighbors():
# Checks whether Returned distances are less than `radius`
# At least one point should be returned when the `radius` is set
# to mean distance from the considering point to other points in
# the database.
# Moreover, this test compares the radius neighbors of LSHForest
# with the `sklearn.neighbors.NearestNeighbors`.
n_samples = 12
n_features = 2
n_iter = 10
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest()
# Test unfitted estimator
assert_raises(ValueError, lshf.radius_neighbors, X[0])
ignore_warnings(lshf.fit)(X)
for i in range(n_iter):
# Select a random point in the dataset as the query
query = X[rng.randint(0, n_samples)].reshape(1, -1)
# At least one neighbor should be returned when the radius is the
# mean distance from the query to the points of the dataset.
mean_dist = np.mean(pairwise_distances(query, X, metric='cosine'))
neighbors = lshf.radius_neighbors(query, radius=mean_dist,
return_distance=False)
assert_equal(neighbors.shape, (1,))
assert_equal(neighbors.dtype, object)
assert_greater(neighbors[0].shape[0], 0)
# All distances to points in the results of the radius query should
# be less than mean_dist
distances, neighbors = lshf.radius_neighbors(query,
radius=mean_dist,
return_distance=True)
assert_array_less(distances[0], mean_dist)
# Multiple points
n_queries = 5
queries = X[rng.randint(0, n_samples, n_queries)]
distances, neighbors = lshf.radius_neighbors(queries,
return_distance=True)
# dists and inds should not be 1D arrays or arrays of variable lengths
# hence the use of the object dtype.
assert_equal(distances.shape, (n_queries,))
assert_equal(distances.dtype, object)
assert_equal(neighbors.shape, (n_queries,))
assert_equal(neighbors.dtype, object)
# Compare with exact neighbor search
query = X[rng.randint(0, n_samples)].reshape(1, -1)
mean_dist = np.mean(pairwise_distances(query, X, metric='cosine'))
nbrs = NearestNeighbors(algorithm='brute', metric='cosine').fit(X)
distances_exact, _ = nbrs.radius_neighbors(query, radius=mean_dist)
distances_approx, _ = lshf.radius_neighbors(query, radius=mean_dist)
# Radius-based queries do not sort the result points and the order
# depends on the method, the random_state and the dataset order. Therefore
# we need to sort the results ourselves before performing any comparison.
sorted_dists_exact = np.sort(distances_exact[0])
sorted_dists_approx = np.sort(distances_approx[0])
# Distances to exact neighbors are less than or equal to approximate
# counterparts as the approximate radius query might have missed some
# closer neighbors.
assert_true(np.all(np.less_equal(sorted_dists_exact,
sorted_dists_approx)))
@ignore_warnings
def test_radius_neighbors_boundary_handling():
X = [[0.999, 0.001], [0.5, 0.5], [0, 1.], [-1., 0.001]]
n_points = len(X)
# Build an exact nearest neighbors model as reference model to ensure
# consistency between exact and approximate methods
nnbrs = NearestNeighbors(algorithm='brute', metric='cosine').fit(X)
# Build a LSHForest model with hyperparameter values that always guarantee
# exact results on this toy dataset.
lsfh = LSHForest(min_hash_match=0, n_candidates=n_points).fit(X)
# define a query aligned with the first axis
query = [[1., 0.]]
# Compute the exact cosine distances of the query to the four points of
# the dataset
dists = pairwise_distances(query, X, metric='cosine').ravel()
# The first point is almost aligned with the query (very small angle),
# the cosine distance should therefore be almost null:
assert_almost_equal(dists[0], 0, decimal=5)
# The second point form an angle of 45 degrees to the query vector
assert_almost_equal(dists[1], 1 - np.cos(np.pi / 4))
# The third point is orthogonal from the query vector hence at a distance
# exactly one:
assert_almost_equal(dists[2], 1)
# The last point is almost colinear but with opposite sign to the query
# therefore it has a cosine 'distance' very close to the maximum possible
# value of 2.
assert_almost_equal(dists[3], 2, decimal=5)
# If we query with a radius of one, all the samples except the last sample
# should be included in the results. This means that the third sample
# is lying on the boundary of the radius query:
exact_dists, exact_idx = nnbrs.radius_neighbors(query, radius=1)
approx_dists, approx_idx = lsfh.radius_neighbors(query, radius=1)
assert_array_equal(np.sort(exact_idx[0]), [0, 1, 2])
assert_array_equal(np.sort(approx_idx[0]), [0, 1, 2])
assert_array_almost_equal( | np.sort(exact_dists[0]) | numpy.sort |
import numpy as np
from utils import getMI
class Node():
def __init__(self, nType, idx):
######################################################################################
## description: ##
## the base class of the basic nodes (in/out-put) in LogicNet ##
## parameters: ##
## nType: node type (in/out/lut), also serves as the prefix of the node name ##
## idx: index of the node in the net (2-tuple) ##
## members: ##
## val: a numpy array of 0/1 values of the node after inference ##
## name: the node name ##
## idx: index of the node in the net (2-tuple) ##
## fis: a list of fanin nodes ##
######################################################################################
self.val = None
self.name = nType + str(idx[0]) + '_' + str(idx[1])
self.idx = idx
self.fis = []
# clears all fanins
def disconnectAll(self):
self.fis.clear()
# adds a node to the fanin list
def connect(self, fanin):
self.fis.append(fanin)
# sets the values of the 'input' node in a net
def setVal(self, data):
self.val = data
# returns the values of the node
def getVal(self):
return self.val
# retrieves and sets the values of the an 'output' node from its single fanin node
def eval(self):
assert len(self.fis) == 1
self.val = self.fis[0].getVal()
# returns the node name
def getName(self):
return self.name
# returns the mutual information between the correct labels and the node's values
def getMI(self, labels):
assert self.val is not None
return getMI(self.val, labels)
class LUT(Node):
def __init__(self, idx, k=6):
######################################################################################
## description: ##
## the class of the look-up-tables in LogicNet, inheritance of the "Node" ##
## parameters: ##
## idx: index of the node in the net (2-tuple) ##
## k: number of inputs of the LUT ##
## members: ##
## k: number of inputs of the LUT ##
## arr: the numpy array the stores the function of the LUT ##
######################################################################################
super().__init__('lut', idx)
self.k = k
shp = (2,) * k
self.arr = (np.random.rand(*shp) < 0.5).astype(np.int8)
# the magic method __getitem__ for LUT
# for a lut object n, and a binary k-tuple X=(x_1, ..., x_k), the method returns n(X)
# usage: (2 types of indexing supported)
# 1. u[X] (with tuple) 2. u[x_1*2^(k-1) + ... + x_k] (with int)
def __getitem__(self, key):
if type(key) == int:
return self.arr.flat[key]
elif type(key) == tuple:
return self.arr[key]
else:
print('Illegal indexing!!')
assert False
# the magic method __setitem__ for LUT
# the method set n(X) to the given binary value v
# usage: (2 types of indexing supported)
# 1. u[X] = v 2. u[x_1*2^(k-1) + ... + x_k] = v
def __setitem__(self, key, value):
if type(key) == int:
self.arr.flat[key] = value
elif type(key) == tuple:
self.arr[key] = value
else:
print('Illegal indexing!!')
assert False
def __prepInVal__(self):
inVals = [nd.getVal() for nd in self.fis]
inVals = np.array(inVals, dtype=np.int8).transpose()
return inVals
def __setRandOut__(self, cnt, randId):
for i in randId:
x = 0
for j in range(self.k):
mask = 1 << j
n = i ^ mask
if cnt.flat[n] > 0:
x += 1
elif cnt.flat[n] < 0:
x -= 1
if x > 0:
self[i] = 1
elif x < 0:
self[i] = 0
# trains the LUT to fit the labels
def train(self, labels):
cnt = np.zeros(self.arr.shape, dtype=np.int32)
inVals = self.__prepInVal__()
assert len(inVals) == len(labels)
for inVal, lab in zip(inVals, labels):
inVal = tuple(inVal)
cnt[inVal] += lab * 2 - 1
#if lab == 1: cnt[inVal] += 1
#elif lab == 0: cnt[inVal] -= 1
#else: assert False
randId = []
for i in range(2**self.k):
if cnt.flat[i] > 0:
self[i] = 1
elif cnt.flat[i] < 0:
self[i] = 0
else:
randId.append(i)
self.__setRandOut__(cnt, randId)
self.eval()
# evalutes the values of the LUT during inference
def eval(self):
inVals = self.__prepInVal__()
self.val = np.array([self[tuple(inVal)] for inVal in inVals], dtype=np.int8)
# converts the information of the LUT to strings
def toStrings(self):
names = [fi.getName() for fi in self.fis] + [self.name]
pats = []
fmt = '0{}b'.format(str(self.k))
for i in range(2**self.k):
if self[i] == 1:
pats.append(format(i, fmt) + ' 1')
if len(pats) == 2**self.k: # const 1
names, pats = [self.name], ['1']
elif len(pats) == 0: # const 0
names = [self.name]
return names, pats
# trains the LUT to mimic a neuron according to the input weights
def trainFromNN(self, W, B):
assert len(W) == self.k
sigmoid = lambda x: 1 / (1 + np.exp(-x))
toBin = lambda x: np.array(list( | np.binary_repr(x, self.k) | numpy.binary_repr |
from pathlib import Path
from timeit import default_timer as timer
import h5py
import numpy as np
import torch
from methods.utils.data_utilities import (_segment_index, load_dcase_format,
to_metrics2020_format)
from torch.utils.data import Dataset, Sampler
from tqdm import tqdm
from utils.common import int16_samples_to_float32
class UserDataset(Dataset):
""" User defined datset
"""
def __init__(self, args, cfg, dataset, dataset_type='train', overlap=''):
"""
Args:
args: input args
cfg: configurations
dataset: dataset used
dataset_type: 'train' | 'valid' | 'dev_test' | 'eval_test'
overlap: '1' | '2'
"""
super().__init__()
self.dataset_type = dataset_type
self.read_into_mem = args.read_into_mem
self.sample_rate = cfg['data']['sample_rate']
self.clip_length = dataset.clip_length
self.label_resolution = dataset.label_resolution
self.frame_length = int(self.clip_length / self.label_resolution)
self.label_interp_ratio = int(self.label_resolution * self.sample_rate / cfg['data']['hop_length'])
# Chunklen and hoplen and segmentation. Since all of the clips are 60s long, it only segments once here
data = np.zeros((1, self.clip_length * self.sample_rate))
if 'train' in self.dataset_type:
chunklen = int(cfg['data']['train_chunklen_sec'] * self.sample_rate)
hoplen = int(cfg['data']['train_hoplen_sec'] * self.sample_rate)
self.segmented_indexes, self.segmented_pad_width = _segment_index(data, chunklen, hoplen)
elif self.dataset_type in ['valid', 'dev_test', 'eval_test']:
chunklen = int(cfg['data']['test_chunklen_sec'] * self.sample_rate)
hoplen = int(cfg['data']['test_hoplen_sec'] * self.sample_rate)
self.segmented_indexes, self.segmented_pad_width = _segment_index(data, chunklen, hoplen, last_frame_always_paddding=True)
self.num_segments = len(self.segmented_indexes)
# Data and meta path
fold_str_idx = dataset.fold_str_index
ov_str_idx = dataset.ov_str_index
data_sr_folder_name = '{}fs'.format(self.sample_rate)
main_data_dir = Path(cfg['hdf5_dir']).joinpath(cfg['dataset']).joinpath('data').joinpath(data_sr_folder_name)
dev_data_dir = main_data_dir.joinpath('dev').joinpath(cfg['data']['type'])
eval_data_dir = main_data_dir.joinpath('eval').joinpath(cfg['data']['type'])
main_meta_dir = Path(cfg['hdf5_dir']).joinpath(cfg['dataset']).joinpath('meta')
dev_meta_dir = main_meta_dir.joinpath('dev')
eval_meta_dir = main_meta_dir.joinpath('eval')
if self.dataset_type == 'train':
data_dirs = [dev_data_dir]
self.meta_dir = dev_meta_dir
train_fold = [int(fold.strip()) for fold in str(cfg['training']['train_fold']).split(',')]
ov_set = str(cfg['training']['overlap']) if not overlap else overlap
self.paths_list = [path for data_dir in data_dirs for path in sorted(data_dir.glob('*.h5')) \
if int(path.stem[fold_str_idx]) in train_fold and path.stem[ov_str_idx] in ov_set \
and not path.name.startswith('.')]
elif self.dataset_type == 'valid':
if cfg['training']['valid_fold'] != 'eval':
data_dirs = [dev_data_dir]
self.meta_dir = dev_meta_dir
valid_fold = [int(fold.strip()) for fold in str(cfg['training']['valid_fold']).split(',')]
ov_set = str(cfg['training']['overlap']) if not overlap else overlap
self.paths_list = [path for data_dir in data_dirs for path in sorted(data_dir.glob('*.h5')) \
if int(path.stem[fold_str_idx]) in valid_fold and path.stem[ov_str_idx] in ov_set \
and not path.name.startswith('.')]
ori_meta_dir = Path(cfg['dataset_dir']).joinpath('metadata_dev')
else:
data_dirs = [eval_data_dir]
self.meta_dir = eval_meta_dir
ov_set = str(cfg['training']['overlap']) if not overlap else overlap
self.paths_list = [path for data_dir in data_dirs for path in sorted(data_dir.glob('*.h5')) \
if not path.name.startswith('.')]
ori_meta_dir = Path(cfg['dataset_dir']).joinpath('metadata_eval')
frame_begin_index = 0
self.valid_gt_sed_metrics2019 = []
self.valid_gt_doa_metrics2019 = []
self.valid_gt_dcaseformat = {}
for path in self.paths_list:
ori_meta_path = ori_meta_dir.joinpath(path.stem + '.csv')
output_dict, sed_metrics2019, doa_metrics2019 = \
load_dcase_format(ori_meta_path, frame_begin_index=frame_begin_index,
frame_length=self.frame_length, num_classes=len(dataset.label_set))
self.valid_gt_dcaseformat.update(output_dict)
self.valid_gt_sed_metrics2019.append(sed_metrics2019)
self.valid_gt_doa_metrics2019.append(doa_metrics2019)
frame_begin_index += self.frame_length
self.valid_gt_sed_metrics2019 = np.concatenate(self.valid_gt_sed_metrics2019, axis=0)
self.valid_gt_doa_metrics2019 = np.concatenate(self.valid_gt_doa_metrics2019, axis=0)
self.gt_metrics2020_dict = to_metrics2020_format(self.valid_gt_dcaseformat,
self.valid_gt_sed_metrics2019.shape[0], label_resolution=self.label_resolution)
elif self.dataset_type == 'dev_test':
data_dirs = [dev_data_dir]
self.meta_dir = dev_meta_dir
dev_test_fold = [int(fold.strip()) for fold in str(cfg['inference']['test_fold']).split(',')]
ov_set = str(cfg['inference']['overlap']) if not overlap else overlap
self.paths_list = [path for data_dir in data_dirs for path in sorted(data_dir.glob('*.h5')) \
if int(path.stem[fold_str_idx]) in dev_test_fold and path.stem[ov_str_idx] in ov_set \
and not path.name.startswith('.')]
elif self.dataset_type == 'eval_test':
data_dirs = [eval_data_dir]
self.meta_dir = eval_meta_dir
self.paths_list = [path for data_dir in data_dirs for path in sorted(data_dir.glob('*.h5')) \
if not path.name.startswith('.')]
self.paths_list = [Path(str(path) + '%' + str(n)) for path in self.paths_list for n in range(self.num_segments)]
# Read into memory
if self.read_into_mem:
load_begin_time = timer()
print('Start to load dataset: {}, ov={}......\n'.format(self.dataset_type + ' set', ov_set))
iterator = tqdm(self.paths_list, total=len(self.paths_list), unit='clips')
self.dataset_list = []
for path in iterator:
fn, n_segment = path.stem, int(path.name.split('%')[1])
data_path = Path(str(path).split('%')[0])
index_begin = self.segmented_indexes[n_segment][0]
index_end = self.segmented_indexes[n_segment][1]
pad_width_before = self.segmented_pad_width[n_segment][0]
pad_width_after = self.segmented_pad_width[n_segment][1]
with h5py.File(data_path, 'r') as hf:
x = int16_samples_to_float32(hf['waveform'][:, index_begin: index_end])
pad_width = ((0, 0), (pad_width_before, pad_width_after))
x = np.pad(x, pad_width, mode='constant')
if 'test' not in self.dataset_type:
ov = fn[-1]
index_begin_label = int(index_begin / (self.sample_rate * self.label_resolution))
index_end_label = int(index_end / (self.sample_rate * self.label_resolution))
# pad_width_before_label = int(pad_width_before / (self.sample_rate * self.label_resolution))
pad_width_after_label = int(pad_width_after / (self.sample_rate * self.label_resolution))
meta_path = self.meta_dir.joinpath(fn + '.h5')
with h5py.File(meta_path, 'r') as hf:
sed_label = hf['sed_label'][index_begin_label: index_end_label, ...]
doa_label = hf['doa_label'][index_begin_label: index_end_label, ...] # NOTE: this is Catesian coordinates
if pad_width_after_label != 0:
sed_label_new = np.zeros((pad_width_after_label, 2, 14))
doa_label_new = np.zeros((pad_width_after_label, 2, 3))
sed_label = np.concatenate((sed_label, sed_label_new), axis=0)
doa_label = np.concatenate((doa_label, doa_label_new), axis=0)
self.dataset_list.append({
'filename': fn,
'n_segment': n_segment,
'ov': ov,
'waveform': x,
'sed_label': sed_label,
'doa_label': doa_label
})
else:
self.dataset_list.append({
'filename': fn,
'n_segment': n_segment,
'waveform': x
})
iterator.close()
print('Loading dataset time: {:.3f}\n'.format(timer()-load_begin_time))
def __len__(self):
"""Get length of the dataset
"""
return len(self.paths_list)
def __getitem__(self, idx):
"""
Read features from the dataset
"""
if self.read_into_mem:
data_dict = self.dataset_list[idx]
fn = data_dict['filename']
n_segment = data_dict['n_segment']
x = data_dict['waveform']
if 'test' not in self.dataset_type:
ov = data_dict['ov']
sed_label = data_dict['sed_label']
doa_label = data_dict['doa_label']
else:
path = self.paths_list[idx]
fn, n_segment = path.stem, int(path.name.split('%')[1])
data_path = Path(str(path).split('%')[0])
index_begin = self.segmented_indexes[n_segment][0]
index_end = self.segmented_indexes[n_segment][1]
pad_width_before = self.segmented_pad_width[n_segment][0]
pad_width_after = self.segmented_pad_width[n_segment][1]
with h5py.File(data_path, 'r') as hf:
x = int16_samples_to_float32(hf['waveform'][:, index_begin: index_end])
pad_width = ((0, 0), (pad_width_before, pad_width_after))
x = np.pad(x, pad_width, mode='constant')
if 'test' not in self.dataset_type:
ov = fn[-1]
index_begin_label = int(index_begin / (self.sample_rate * self.label_resolution))
index_end_label = int(index_end / (self.sample_rate * self.label_resolution))
# pad_width_before_label = int(pad_width_before / (self.sample_rate * self.label_resolution))
pad_width_after_label = int(pad_width_after / (self.sample_rate * self.label_resolution))
meta_path = self.meta_dir.joinpath(fn + '.h5')
with h5py.File(meta_path, 'r') as hf:
sed_label = hf['sed_label'][index_begin_label: index_end_label, ...]
doa_label = hf['doa_label'][index_begin_label: index_end_label, ...] # NOTE: this is Catesian coordinates
if pad_width_after_label != 0:
sed_label_new = np.zeros((pad_width_after_label, 2, 14))
doa_label_new = np.zeros((pad_width_after_label, 2, 3))
sed_label = np.concatenate((sed_label, sed_label_new), axis=0)
doa_label = np.concatenate((doa_label, doa_label_new), axis=0)
if 'test' not in self.dataset_type:
sample = {
'filename': fn,
'n_segment': n_segment,
'ov': ov,
'waveform': x,
'sed_label': sed_label,
'doa_label': doa_label
}
else:
sample = {
'filename': fn,
'n_segment': n_segment,
'waveform': x
}
return sample
class UserBatchSampler(Sampler):
"""User defined batch sampler. Only for train set.
"""
def __init__(self, clip_num, batch_size, seed=2020):
self.clip_num = clip_num
self.batch_size = batch_size
self.random_state = np.random.RandomState(seed)
self.indexes = | np.arange(self.clip_num) | numpy.arange |
from rpn.generation import compute_deltas
import numpy as np
def make_corners(regions):
"""Transforms boxes from center format to corners"""
x0, y0, x1, y1 = np.transpose(regions)
ws = x1 - x0 // 2
hs = y1 - y0 // 2
return np.transpose([x0 + ws, y0 + hs, 2 * ws, 2 * hs])
def compute_iou(regions, gt_boxes):
"""IOU metric computation. Differs from rpn version as there is no place for precomputation
Also returns indices instead of respective gt boxes as classes are chosen with their help"""
regions_areas = regions[:, 2] * regions[:, 3]
x0, y0, x1, y1 = make_corners(regions)
ious = []
for gt_box in gt_boxes:
gt_x0, gt_y0, gt_x1, gt_y1 = gt_box
gt_area = (gt_x1 - gt_x0) * (gt_y1 - gt_y0)
int_x0 = np.maximum(gt_x0, x0)
int_y0 = np.maximum(gt_y0, y0)
int_x1 = | np.minimum(gt_x1, x1) | numpy.minimum |
# -*- coding: utf-8 -*-
"""
Copyright (c) 2019 <NAME>
pySME is a Python script to run R SME package
(https://cran.r-project.org/web/packages/sme/index.html). SME package generates
smoothing-splines mixed-effects models from metabolomics data. This script
follows methodology given by Berk et al. (2011) and utilizes bootstrapping to
approximate p-values. Running this script requires R with SME package installed.
"""
import os
import numpy as np
from scipy import interpolate
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib import cm
from rpy2.robjects.packages import importr
from rpy2.robjects import pandas2ri
import statsmodels.stats.multitest as smm
import time
import copy
import smeutils
smePack = importr('sme', lib_loc="C:/Users/user/Documents/R/win-library/3.6")
statsPack = importr('stats')
# Settings ====================================================================
# Input files
info = pd.read_csv('./sme_info.csv')
data = pd.read_csv('./sme_data.csv')
info_arr = np.array(info)
data_fid = np.array(data.columns)
data_arr = np.array(data)
selIdx = np.arange(len(data_fid))
# Parameters
RUN = True
N = 12 # Number of subjects
t_n = 4 # Number of time points
iplN = 100 # Number of interpolated time points
n_bootstrap = 500 # Number of bootstrap sampling
selIdx = selIdx[:] # List of metabolites to analyze
relative = False # Scale data to initial values
correctOutlier = False
SAVE = False
USEMEAN = True
# SME Parameters
ctra = "AICc" # Criteria
init_l_mc = 1e-8 # Initial lambda_mu
init_l_vc = 1e-8 # Initial lambda_v
init_l_mt = 5e-8 # Initial lambda_mu
init_l_vt = 5e-8 # Initial lambda_v
maxIter = 100000 # Maximum iteration
deltaEM = 1e-3 # Threshold for expetation maximization
deltaNM = 1e-3 # Threshold for nelder mead
normalizeTime = True
seed = 1234 # RNG seed
showFig = False # Flag to plot figures
figSize = (20,16) # Size of figures
plotLegend = False # Flag to plot legend
colorMap = 'viridis' # kwarg for colormap
plotSMEMeanOnly = False # Only plot SME mean trace
mergePlot = True # Merge multiple plots
plotHeatmap = False # Plot heatmap comparing two data groups
t = np.array([1,3,5,7])
iplT = np.linspace(1, 7, iplN)
iplTIdx = np.empty(t_n)
for i in range(t_n):
iplTIdx[i] = np.where(iplT == t[i])[0]
iplTIdx = iplTIdx.astype(int)
sel = np.array([data_fid[selIdx]]).flatten()
#==============================================================================
np.random.seed(seed) # Set seed
#==============================================================================
if relative:
data = smeutils.normalizeData(data, N, t_n, data_fid)
#==============================================================================
t0 = time.time()
fulldataRaw = pd.concat([info,data], axis=1)
fulldataRaw = fulldataRaw.astype('float64')
fulldata = copy.deepcopy(fulldataRaw)
fulldata = fulldata.drop(fulldata.index[16]) # ind 5 has an outlier
if correctOutlier:
fulldata = smeutils.correctOutlier(fulldata, sel, t, t_n)
# Initialize ==================================================================
grp0_f = fulldata[(fulldata.grp == 0)]['ind']
grp1_f = fulldata[(fulldata.grp == 1)]['ind']
grp0 = np.unique(fulldata[(fulldata.grp == 0)]['ind'])
grp1 = np.unique(fulldata[(fulldata.grp == 1)]['ind'])
pandas2ri.activate()
fd_ri = pandas2ri.py2ri(fulldata)
fd_rigrp0 = fd_ri.rx(fd_ri.rx2("grp").ro == 0, True)
fd_rigrp1 = fd_ri.rx(fd_ri.rx2("grp").ro == 1, True)
fd_rigrp0tme = fd_rigrp0.rx2("tme")
fd_rigrp0ind = fd_rigrp0.rx2("ind")
fd_rigrp1tme = fd_rigrp1.rx2("tme")
fd_rigrp1ind = fd_rigrp1.rx2("ind")
ys0mu = np.empty((len(sel), iplN))
ys1mu = np.empty((len(sel), iplN))
ys0vHat = np.empty((len(sel), len(grp0), iplN))
ys1vHat = np.empty((len(sel), len(grp1), iplN))
l2 = np.empty(len(sel))
se = np.empty(len(sel))
se0 = np.empty((len(sel), len(grp0)))
se1 = np.empty((len(sel), len(grp1)))
sem = np.empty(len(sel))
tval = np.empty(len(sel))
ys0v = np.empty((len(sel), len(grp0), t_n))
ys1v = np.empty((len(sel), len(grp1), t_n))
ys0eta = np.empty((len(sel), len(grp0), t_n))
ys1eta = np.empty((len(sel), len(grp1), t_n))
ys0mubs = np.empty((n_bootstrap, len(sel), iplN))
ys1mubs = np.empty((n_bootstrap, len(sel), iplN))
ys0vHatbs = np.empty((n_bootstrap, len(sel), len(grp0), iplN))
ys1vHatbs = np.empty((n_bootstrap, len(sel), len(grp1), iplN))
l2bs = np.empty((n_bootstrap, len(sel)))
sebs = np.empty((n_bootstrap, len(sel)))
se0bs = np.empty((n_bootstrap, len(sel), len(grp0)))
se1bs = np.empty((n_bootstrap, len(sel), len(grp1)))
sembs = np.empty((n_bootstrap, len(sel)))
tvalbs = np.empty((n_bootstrap, len(sel)))
pval = np.empty(len(sel))
t1 = time.time()
print(t1 - t0)
# SME =========================================================================
if RUN:
for m_i in range(len(sel)):
fd_rigrp0obj = fd_rigrp0.rx2(sel[m_i])
fd_rigrp1obj = fd_rigrp1.rx2(sel[m_i])
fit0 = smePack.sme(fd_rigrp0obj,
fd_rigrp0tme,
fd_rigrp0ind,
criteria=ctra,
maxIter=maxIter,
deltaEM=deltaEM,
deltaNM=deltaNM,
initial_lambda_mu=init_l_mc,
initial_lambda_v=init_l_mc,
normalizeTime=normalizeTime)
fit1 = smePack.sme(fd_rigrp1obj,
fd_rigrp1tme,
fd_rigrp1ind,
criteria=ctra,
maxIter=maxIter,
deltaEM=deltaEM,
deltaNM=deltaNM,
initial_lambda_mu=init_l_mt,
initial_lambda_v=init_l_vt,
normalizeTime=normalizeTime)
fit0coef = np.array(fit0.rx2('coefficients'))
fit1coef = np.array(fit1.rx2('coefficients'))
spl0mu = interpolate.CubicSpline(t, fit0coef[0], bc_type='natural')
ys0mu[m_i] = spl0mu(iplT)
spl1mu = interpolate.CubicSpline(t, fit1coef[0], bc_type='natural')
ys1mu[m_i] = spl1mu(iplT)
l2[m_i] = np.sqrt(np.trapz(np.square(ys0mu[m_i] - ys1mu[m_i]), x=iplT))
for g0 in range(len(grp0)):
spl0 = interpolate.CubicSpline(t, fit0coef[g0 + 1] + fit0coef[0], bc_type='natural')
ys0vHat[m_i][g0] = spl0(iplT)
ys0v[m_i][g0] = ys0mu[m_i][iplTIdx] - ys0vHat[m_i][g0][iplTIdx]
ys0eta[m_i][g0] = fulldataRaw.loc[fulldataRaw.ind == grp0[g0], sel[m_i]] - ys0vHat[m_i][g0][iplTIdx]
se0[m_i][g0] = np.trapz(np.square(ys0mu[m_i] - ys0vHat[m_i][g0]), x=iplT)
for g1 in range(len(grp1)):
spl1 = interpolate.CubicSpline(t, fit1coef[g1 + 1] + fit1coef[0], bc_type='natural')
ys1vHat[m_i][g1] = spl1(iplT)
ys1v[m_i][g1] = ys1mu[m_i][iplTIdx] - ys1vHat[m_i][g1][iplTIdx]
ys1eta[m_i][g1] = fulldataRaw.loc[fulldataRaw.ind == grp1[g1], sel[m_i]] - ys1vHat[m_i][g1][iplTIdx]
se1[m_i][g1] = np.trapz(np.square(ys1mu[m_i] - ys1vHat[m_i][g1]), x=iplT)
se[m_i] = np.sqrt(np.mean(se0[m_i])/len(grp0) + np.mean(se1[m_i])/len(grp1))
sem = 0.
tval = np.divide(l2, se + sem)
ys0vFlat = ys0v.reshape((ys0v.shape[0], -1))
ys0etaFlat = ys0eta.reshape((ys0eta.shape[0], -1))
ys0etaFlat = np.delete(ys0etaFlat, 13, 1) # ind 5 has an outlier
ys1vFlat = ys1v.reshape((ys1v.shape[0], -1))
ys1etaFlat = ys1eta.reshape((ys1eta.shape[0], -1))
t2 = time.time()
print(t2 - t1)
# Bootstrapping ===============================================================
fulldataS = []
for bcount in range(n_bootstrap):
print("Bootstrap run: " + str(bcount))
fulldataC = copy.deepcopy(fulldataRaw)
for m_i in range(len(sel)):
if USEMEAN:
for Di in range(N):
ysmuMean = (ys0mu[m_i][iplTIdx] + ys1mu[m_i][iplTIdx])/2
if Di in grp0:
fulldataC[sel[m_i]][np.arange(0,t_n*N,N)+Di] = (ysmuMean
+ np.random.choice(ys0vFlat[m_i], size=t_n)
+ np.random.choice(ys0etaFlat[m_i], size=t_n))
else:
fulldataC[sel[m_i]][np.arange(0,t_n*N,N)+Di] = (ysmuMean
+ np.random.choice(ys1vFlat[m_i], size=t_n)
+ np.random.choice(ys1etaFlat[m_i], size=t_n))
else:
ct_rand = np.random.rand()
for Di in range(N):
if ct_rand < 0.5:
if Di in grp0:
fulldataC[sel[m_i]][np.arange(0,t_n*N,N)+Di] = (ys0mu[m_i][iplTIdx]
+ np.random.choice(ys0vFlat[m_i], size=t_n)
+ np.random.choice(ys0etaFlat[m_i], size=t_n))
else:
fulldataC[sel[m_i]][np.arange(0,t_n*N,N)+Di] = (ys0mu[m_i][iplTIdx]
+ | np.random.choice(ys1vFlat[m_i], size=t_n) | numpy.random.choice |
# This module has been generated automatically from space group information
# obtained from the Computational Crystallography Toolbox
#
"""
Space groups
This module contains a list of all the 230 space groups that can occur in
a crystal. The variable space_groups contains a dictionary that maps
space group numbers and space group names to the corresponding space
group objects.
.. moduleauthor:: <NAME> <<EMAIL>>
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2013 The Mosaic Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file LICENSE.txt, distributed as part of this software.
#-----------------------------------------------------------------------------
import numpy as N
class SpaceGroup(object):
"""
Space group
All possible space group objects are created in this module. Other
modules should access these objects through the dictionary
space_groups rather than create their own space group objects.
"""
def __init__(self, number, symbol, transformations):
"""
:param number: the number assigned to the space group by
international convention
:type number: int
:param symbol: the Hermann-Mauguin space-group symbol as used
in PDB and mmCIF files
:type symbol: str
:param transformations: a list of space group transformations,
each consisting of a tuple of three
integer arrays (rot, tn, td), where
rot is the rotation matrix and tn/td
are the numerator and denominator of the
translation vector. The transformations
are defined in fractional coordinates.
:type transformations: list
"""
self.number = number
self.symbol = symbol
self.transformations = transformations
self.transposed_rotations = N.array([N.transpose(t[0])
for t in transformations])
self.phase_factors = N.exp(N.array([(-2j*N.pi*t[1])/t[2]
for t in transformations]))
def __repr__(self):
return "SpaceGroup(%d, %s)" % (self.number, repr(self.symbol))
def __len__(self):
"""
:return: the number of space group transformations
:rtype: int
"""
return len(self.transformations)
def symmetryEquivalentMillerIndices(self, hkl):
"""
:param hkl: a set of Miller indices
:type hkl: Scientific.N.array_type
:return: a tuple (miller_indices, phase_factor) of two arrays
of length equal to the number of space group
transformations. miller_indices contains the Miller
indices of each reflection equivalent by symmetry to the
reflection hkl (including hkl itself as the first element).
phase_factor contains the phase factors that must be applied
to the structure factor of reflection hkl to obtain the
structure factor of the symmetry equivalent reflection.
:rtype: tuple
"""
hkls = N.dot(self.transposed_rotations, hkl)
p = N.multiply.reduce(self.phase_factors**hkl, -1)
return hkls, p
space_groups = {}
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(1, 'P 1', transformations)
space_groups[1] = sg
space_groups['P 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(2, 'P -1', transformations)
space_groups[2] = sg
space_groups['P -1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(3, 'P 1 2 1', transformations)
space_groups[3] = sg
space_groups['P 1 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(4, 'P 1 21 1', transformations)
space_groups[4] = sg
space_groups['P 1 21 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(5, 'C 1 2 1', transformations)
space_groups[5] = sg
space_groups['C 1 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(6, 'P 1 m 1', transformations)
space_groups[6] = sg
space_groups['P 1 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(7, 'P 1 c 1', transformations)
space_groups[7] = sg
space_groups['P 1 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(8, 'C 1 m 1', transformations)
space_groups[8] = sg
space_groups['C 1 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(9, 'C 1 c 1', transformations)
space_groups[9] = sg
space_groups['C 1 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(10, 'P 1 2/m 1', transformations)
space_groups[10] = sg
space_groups['P 1 2/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(11, 'P 1 21/m 1', transformations)
space_groups[11] = sg
space_groups['P 1 21/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(12, 'C 1 2/m 1', transformations)
space_groups[12] = sg
space_groups['C 1 2/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(13, 'P 1 2/c 1', transformations)
space_groups[13] = sg
space_groups['P 1 2/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(14, 'P 1 21/c 1', transformations)
space_groups[14] = sg
space_groups['P 1 21/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(15, 'C 1 2/c 1', transformations)
space_groups[15] = sg
space_groups['C 1 2/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(16, 'P 2 2 2', transformations)
space_groups[16] = sg
space_groups['P 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(17, 'P 2 2 21', transformations)
space_groups[17] = sg
space_groups['P 2 2 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(18, 'P 21 21 2', transformations)
space_groups[18] = sg
space_groups['P 21 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(19, 'P 21 21 21', transformations)
space_groups[19] = sg
space_groups['P 21 21 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(20, 'C 2 2 21', transformations)
space_groups[20] = sg
space_groups['C 2 2 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(21, 'C 2 2 2', transformations)
space_groups[21] = sg
space_groups['C 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(22, 'F 2 2 2', transformations)
space_groups[22] = sg
space_groups['F 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(23, 'I 2 2 2', transformations)
space_groups[23] = sg
space_groups['I 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(24, 'I 21 21 21', transformations)
space_groups[24] = sg
space_groups['I 21 21 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(25, 'P m m 2', transformations)
space_groups[25] = sg
space_groups['P m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(26, 'P m c 21', transformations)
space_groups[26] = sg
space_groups['P m c 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(27, 'P c c 2', transformations)
space_groups[27] = sg
space_groups['P c c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(28, 'P m a 2', transformations)
space_groups[28] = sg
space_groups['P m a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(29, 'P c a 21', transformations)
space_groups[29] = sg
space_groups['P c a 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(30, 'P n c 2', transformations)
space_groups[30] = sg
space_groups['P n c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(31, 'P m n 21', transformations)
space_groups[31] = sg
space_groups['P m n 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(32, 'P b a 2', transformations)
space_groups[32] = sg
space_groups['P b a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(33, 'P n a 21', transformations)
space_groups[33] = sg
space_groups['P n a 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(34, 'P n n 2', transformations)
space_groups[34] = sg
space_groups['P n n 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(35, 'C m m 2', transformations)
space_groups[35] = sg
space_groups['C m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(36, 'C m c 21', transformations)
space_groups[36] = sg
space_groups['C m c 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(37, 'C c c 2', transformations)
space_groups[37] = sg
space_groups['C c c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(38, 'A m m 2', transformations)
space_groups[38] = sg
space_groups['A m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(39, 'A b m 2', transformations)
space_groups[39] = sg
space_groups['A b m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(40, 'A m a 2', transformations)
space_groups[40] = sg
space_groups['A m a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(41, 'A b a 2', transformations)
space_groups[41] = sg
space_groups['A b a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(42, 'F m m 2', transformations)
space_groups[42] = sg
space_groups['F m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(43, 'F d d 2', transformations)
space_groups[43] = sg
space_groups['F d d 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(44, 'I m m 2', transformations)
space_groups[44] = sg
space_groups['I m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(45, 'I b a 2', transformations)
space_groups[45] = sg
space_groups['I b a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(46, 'I m a 2', transformations)
space_groups[46] = sg
space_groups['I m a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(47, 'P m m m', transformations)
space_groups[47] = sg
space_groups['P m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(48, 'P n n n :2', transformations)
space_groups[48] = sg
space_groups['P n n n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(49, 'P c c m', transformations)
space_groups[49] = sg
space_groups['P c c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(50, 'P b a n :2', transformations)
space_groups[50] = sg
space_groups['P b a n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(51, 'P m m a', transformations)
space_groups[51] = sg
space_groups['P m m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(52, 'P n n a', transformations)
space_groups[52] = sg
space_groups['P n n a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(53, 'P m n a', transformations)
space_groups[53] = sg
space_groups['P m n a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(54, 'P c c a', transformations)
space_groups[54] = sg
space_groups['P c c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(55, 'P b a m', transformations)
space_groups[55] = sg
space_groups['P b a m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(56, 'P c c n', transformations)
space_groups[56] = sg
space_groups['P c c n'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(57, 'P b c m', transformations)
space_groups[57] = sg
space_groups['P b c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(58, 'P n n m', transformations)
space_groups[58] = sg
space_groups['P n n m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(59, 'P m m n :2', transformations)
space_groups[59] = sg
space_groups['P m m n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(60, 'P b c n', transformations)
space_groups[60] = sg
space_groups['P b c n'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(61, 'P b c a', transformations)
space_groups[61] = sg
space_groups['P b c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(62, 'P n m a', transformations)
space_groups[62] = sg
space_groups['P n m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(63, 'C m c m', transformations)
space_groups[63] = sg
space_groups['C m c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(64, 'C m c a', transformations)
space_groups[64] = sg
space_groups['C m c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(65, 'C m m m', transformations)
space_groups[65] = sg
space_groups['C m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(66, 'C c c m', transformations)
space_groups[66] = sg
space_groups['C c c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(67, 'C m m a', transformations)
space_groups[67] = sg
space_groups['C m m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(68, 'C c c a :2', transformations)
space_groups[68] = sg
space_groups['C c c a :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(69, 'F m m m', transformations)
space_groups[69] = sg
space_groups['F m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,3,3])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,0,3])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(70, 'F d d d :2', transformations)
space_groups[70] = sg
space_groups['F d d d :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(71, 'I m m m', transformations)
space_groups[71] = sg
space_groups['I m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(72, 'I b a m', transformations)
space_groups[72] = sg
space_groups['I b a m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(73, 'I b c a', transformations)
space_groups[73] = sg
space_groups['I b c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(74, 'I m m a', transformations)
space_groups[74] = sg
space_groups['I m m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(75, 'P 4', transformations)
space_groups[75] = sg
space_groups['P 4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(76, 'P 41', transformations)
space_groups[76] = sg
space_groups['P 41'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(77, 'P 42', transformations)
space_groups[77] = sg
space_groups['P 42'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(78, 'P 43', transformations)
space_groups[78] = sg
space_groups['P 43'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(79, 'I 4', transformations)
space_groups[79] = sg
space_groups['I 4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(80, 'I 41', transformations)
space_groups[80] = sg
space_groups['I 41'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(81, 'P -4', transformations)
space_groups[81] = sg
space_groups['P -4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(82, 'I -4', transformations)
space_groups[82] = sg
space_groups['I -4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(83, 'P 4/m', transformations)
space_groups[83] = sg
space_groups['P 4/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(84, 'P 42/m', transformations)
space_groups[84] = sg
space_groups['P 42/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(85, 'P 4/n :2', transformations)
space_groups[85] = sg
space_groups['P 4/n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(86, 'P 42/n :2', transformations)
space_groups[86] = sg
space_groups['P 42/n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(87, 'I 4/m', transformations)
space_groups[87] = sg
space_groups['I 4/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,5,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(88, 'I 41/a :2', transformations)
space_groups[88] = sg
space_groups['I 41/a :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(89, 'P 4 2 2', transformations)
space_groups[89] = sg
space_groups['P 4 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(90, 'P 4 21 2', transformations)
space_groups[90] = sg
space_groups['P 4 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(91, 'P 41 2 2', transformations)
space_groups[91] = sg
space_groups['P 41 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(92, 'P 41 21 2', transformations)
space_groups[92] = sg
space_groups['P 41 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(93, 'P 42 2 2', transformations)
space_groups[93] = sg
space_groups['P 42 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(94, 'P 42 21 2', transformations)
space_groups[94] = sg
space_groups['P 42 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(95, 'P 43 2 2', transformations)
space_groups[95] = sg
space_groups['P 43 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(96, 'P 43 21 2', transformations)
space_groups[96] = sg
space_groups['P 43 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(97, 'I 4 2 2', transformations)
space_groups[97] = sg
space_groups['I 4 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(98, 'I 41 2 2', transformations)
space_groups[98] = sg
space_groups['I 41 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(99, 'P 4 m m', transformations)
space_groups[99] = sg
space_groups['P 4 m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(100, 'P 4 b m', transformations)
space_groups[100] = sg
space_groups['P 4 b m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(101, 'P 42 c m', transformations)
space_groups[101] = sg
space_groups['P 42 c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(102, 'P 42 n m', transformations)
space_groups[102] = sg
space_groups['P 42 n m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(103, 'P 4 c c', transformations)
space_groups[103] = sg
space_groups['P 4 c c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(104, 'P 4 n c', transformations)
space_groups[104] = sg
space_groups['P 4 n c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(105, 'P 42 m c', transformations)
space_groups[105] = sg
space_groups['P 42 m c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(106, 'P 42 b c', transformations)
space_groups[106] = sg
space_groups['P 42 b c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(107, 'I 4 m m', transformations)
space_groups[107] = sg
space_groups['I 4 m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(108, 'I 4 c m', transformations)
space_groups[108] = sg
space_groups['I 4 c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(109, 'I 41 m d', transformations)
space_groups[109] = sg
space_groups['I 41 m d'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(110, 'I 41 c d', transformations)
space_groups[110] = sg
space_groups['I 41 c d'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(111, 'P -4 2 m', transformations)
space_groups[111] = sg
space_groups['P -4 2 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(112, 'P -4 2 c', transformations)
space_groups[112] = sg
space_groups['P -4 2 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(113, 'P -4 21 m', transformations)
space_groups[113] = sg
space_groups['P -4 21 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(114, 'P -4 21 c', transformations)
space_groups[114] = sg
space_groups['P -4 21 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(115, 'P -4 m 2', transformations)
space_groups[115] = sg
space_groups['P -4 m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(116, 'P -4 c 2', transformations)
space_groups[116] = sg
space_groups['P -4 c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(117, 'P -4 b 2', transformations)
space_groups[117] = sg
space_groups['P -4 b 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(118, 'P -4 n 2', transformations)
space_groups[118] = sg
space_groups['P -4 n 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(119, 'I -4 m 2', transformations)
space_groups[119] = sg
space_groups['I -4 m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(120, 'I -4 c 2', transformations)
space_groups[120] = sg
space_groups['I -4 c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(121, 'I -4 2 m', transformations)
space_groups[121] = sg
space_groups['I -4 2 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(122, 'I -4 2 d', transformations)
space_groups[122] = sg
space_groups['I -4 2 d'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(123, 'P 4/m m m', transformations)
space_groups[123] = sg
space_groups['P 4/m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(124, 'P 4/m c c', transformations)
space_groups[124] = sg
space_groups['P 4/m c c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(125, 'P 4/n b m :2', transformations)
space_groups[125] = sg
space_groups['P 4/n b m :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(126, 'P 4/n n c :2', transformations)
space_groups[126] = sg
space_groups['P 4/n n c :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(127, 'P 4/m b m', transformations)
space_groups[127] = sg
space_groups['P 4/m b m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(128, 'P 4/m n c', transformations)
space_groups[128] = sg
space_groups['P 4/m n c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(129, 'P 4/n m m :2', transformations)
space_groups[129] = sg
space_groups['P 4/n m m :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(130, 'P 4/n c c :2', transformations)
space_groups[130] = sg
space_groups['P 4/n c c :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(131, 'P 42/m m c', transformations)
space_groups[131] = sg
space_groups['P 42/m m c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(132, 'P 42/m c m', transformations)
space_groups[132] = sg
space_groups['P 42/m c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(133, 'P 42/n b c :2', transformations)
space_groups[133] = sg
space_groups['P 42/n b c :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(134, 'P 42/n n m :2', transformations)
space_groups[134] = sg
space_groups['P 42/n n m :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(135, 'P 42/m b c', transformations)
space_groups[135] = sg
space_groups['P 42/m b c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(136, 'P 42/m n m', transformations)
space_groups[136] = sg
space_groups['P 42/m n m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(137, 'P 42/n m c :2', transformations)
space_groups[137] = sg
space_groups['P 42/n m c :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(138, 'P 42/n c m :2', transformations)
space_groups[138] = sg
space_groups['P 42/n c m :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(139, 'I 4/m m m', transformations)
space_groups[139] = sg
space_groups['I 4/m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(140, 'I 4/m c m', transformations)
space_groups[140] = sg
space_groups['I 4/m c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,5,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,5,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,3,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(141, 'I 41/a m d :2', transformations)
space_groups[141] = sg
space_groups['I 41/a m d :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,5,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,5,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(142, 'I 41/a c d :2', transformations)
space_groups[142] = sg
space_groups['I 41/a c d :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(143, 'P 3', transformations)
space_groups[143] = sg
space_groups['P 3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(144, 'P 31', transformations)
space_groups[144] = sg
space_groups['P 31'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(145, 'P 32', transformations)
space_groups[145] = sg
space_groups['P 32'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(146, 'R 3 :H', transformations)
space_groups[146] = sg
space_groups['R 3 :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(147, 'P -3', transformations)
space_groups[147] = sg
space_groups['P -3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(148, 'R -3 :H', transformations)
space_groups[148] = sg
space_groups['R -3 :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(149, 'P 3 1 2', transformations)
space_groups[149] = sg
space_groups['P 3 1 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(150, 'P 3 2 1', transformations)
space_groups[150] = sg
space_groups['P 3 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(151, 'P 31 1 2', transformations)
space_groups[151] = sg
space_groups['P 31 1 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(152, 'P 31 2 1', transformations)
space_groups[152] = sg
space_groups['P 31 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(153, 'P 32 1 2', transformations)
space_groups[153] = sg
space_groups['P 32 1 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(154, 'P 32 2 1', transformations)
space_groups[154] = sg
space_groups['P 32 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(155, 'R 3 2 :H', transformations)
space_groups[155] = sg
space_groups['R 3 2 :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(156, 'P 3 m 1', transformations)
space_groups[156] = sg
space_groups['P 3 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(157, 'P 3 1 m', transformations)
space_groups[157] = sg
space_groups['P 3 1 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(158, 'P 3 c 1', transformations)
space_groups[158] = sg
space_groups['P 3 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(159, 'P 3 1 c', transformations)
space_groups[159] = sg
space_groups['P 3 1 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(160, 'R 3 m :H', transformations)
space_groups[160] = sg
space_groups['R 3 m :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,7])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,7])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,7])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,5])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,5])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,5])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(161, 'R 3 c :H', transformations)
space_groups[161] = sg
space_groups['R 3 c :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(162, 'P -3 1 m', transformations)
space_groups[162] = sg
space_groups['P -3 1 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(163, 'P -3 1 c', transformations)
space_groups[163] = sg
space_groups['P -3 1 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(164, 'P -3 m 1', transformations)
space_groups[164] = sg
space_groups['P -3 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(165, 'P -3 c 1', transformations)
space_groups[165] = sg
space_groups['P -3 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(166, 'R -3 m :H', transformations)
space_groups[166] = sg
space_groups['R -3 m :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,7])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,7])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,7])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,1])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,1])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,1])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,5])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,5])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,5])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,-1])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,-1])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,-1])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(167, 'R -3 c :H', transformations)
space_groups[167] = sg
space_groups['R -3 c :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(168, 'P 6', transformations)
space_groups[168] = sg
space_groups['P 6'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,5])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(169, 'P 61', transformations)
space_groups[169] = sg
space_groups['P 61'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,5])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(170, 'P 65', transformations)
space_groups[170] = sg
space_groups['P 65'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(171, 'P 62', transformations)
space_groups[171] = sg
space_groups['P 62'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(172, 'P 64', transformations)
space_groups[172] = sg
space_groups['P 64'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(173, 'P 63', transformations)
space_groups[173] = sg
space_groups['P 63'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(174, 'P -6', transformations)
space_groups[174] = sg
space_groups['P -6'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(175, 'P 6/m', transformations)
space_groups[175] = sg
space_groups['P 6/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(176, 'P 63/m', transformations)
space_groups[176] = sg
space_groups['P 63/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(177, 'P 6 2 2', transformations)
space_groups[177] = sg
space_groups['P 6 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,5])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,5])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(178, 'P 61 2 2', transformations)
space_groups[178] = sg
space_groups['P 61 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,5])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,5])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(179, 'P 65 2 2', transformations)
space_groups[179] = sg
space_groups['P 65 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(180, 'P 62 2 2', transformations)
space_groups[180] = sg
space_groups['P 62 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(181, 'P 64 2 2', transformations)
space_groups[181] = sg
space_groups['P 64 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(182, 'P 63 2 2', transformations)
space_groups[182] = sg
space_groups['P 63 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(183, 'P 6 m m', transformations)
space_groups[183] = sg
space_groups['P 6 m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(184, 'P 6 c c', transformations)
space_groups[184] = sg
space_groups['P 6 c c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(185, 'P 63 c m', transformations)
space_groups[185] = sg
space_groups['P 63 c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(186, 'P 63 m c', transformations)
space_groups[186] = sg
space_groups['P 63 m c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(187, 'P -6 m 2', transformations)
space_groups[187] = sg
space_groups['P -6 m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(188, 'P -6 c 2', transformations)
space_groups[188] = sg
space_groups['P -6 c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(189, 'P -6 2 m', transformations)
space_groups[189] = sg
space_groups['P -6 2 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(190, 'P -6 2 c', transformations)
space_groups[190] = sg
space_groups['P -6 2 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(191, 'P 6/m m m', transformations)
space_groups[191] = sg
space_groups['P 6/m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(192, 'P 6/m c c', transformations)
space_groups[192] = sg
space_groups['P 6/m c c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(193, 'P 63/m c m', transformations)
space_groups[193] = sg
space_groups['P 63/m c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(194, 'P 63/m m c', transformations)
space_groups[194] = sg
space_groups['P 63/m m c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(195, 'P 2 3', transformations)
space_groups[195] = sg
space_groups['P 2 3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(196, 'F 2 3', transformations)
space_groups[196] = sg
space_groups['F 2 3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(197, 'I 2 3', transformations)
space_groups[197] = sg
space_groups['I 2 3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(198, 'P 21 3', transformations)
space_groups[198] = sg
space_groups['P 21 3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(199, 'I 21 3', transformations)
space_groups[199] = sg
space_groups['I 21 3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(200, 'P m -3', transformations)
space_groups[200] = sg
space_groups['P m -3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(201, 'P n -3 :2', transformations)
space_groups[201] = sg
space_groups['P n -3 :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(202, 'F m -3', transformations)
space_groups[202] = sg
space_groups['F m -3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,3,3])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,3,3])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,3,3])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,0,3])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,0,3])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,0,3])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(203, 'F d -3 :2', transformations)
space_groups[203] = sg
space_groups['F d -3 :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(204, 'I m -3', transformations)
space_groups[204] = sg
space_groups['I m -3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(205, 'P a -3', transformations)
space_groups[205] = sg
space_groups['P a -3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(206, 'I a -3', transformations)
space_groups[206] = sg
space_groups['I a -3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(207, 'P 4 3 2', transformations)
space_groups[207] = sg
space_groups['P 4 3 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(208, 'P 42 3 2', transformations)
space_groups[208] = sg
space_groups['P 42 3 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(209, 'F 4 3 2', transformations)
space_groups[209] = sg
space_groups['F 4 3 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = | N.array([4,4,4]) | numpy.array |
from __future__ import print_function, absolute_import
import numpy as np
import vtk
from collections import defaultdict
from itertools import chain
from six import itervalues
from vtk.util.numpy_support import numpy_to_vtk
def _basic_elem(elem, cell_type, ugrid, nid_dict):
nids = [nid_dict[nid] for nid in elem.node_ids]
ids = vtk.vtkIdList()
ids.SetNumberOfIds(len(nids))
for i in range(len(nids)):
ids.SetId(i, nids[i])
ugrid.InsertNextCell(cell_type, ids)
return elem.eid
def _grid(grid, cell_type, ugrid, nid_dict):
nids = [nid_dict[nid] for nid in [grid.nid]]
ids = vtk.vtkIdList()
ids.SetNumberOfIds(len(nids))
for i in range(len(nids)):
ids.SetId(i, nids[i])
ugrid.InsertNextCell(cell_type, ids)
return grid.nid
def _rbe2(elem, cell_type, ugrid, nid_dict):
independent = elem.gn
dependent = elem.Gmi
nids = []
for i in range(len(dependent)):
nids.append(nid_dict[independent])
nids.append(nid_dict[dependent[i]])
ids = vtk.vtkIdList()
ids.SetNumberOfIds(len(nids))
for i in range(len(nids)):
ids.SetId(i, nids[i])
ugrid.InsertNextCell(cell_type, ids)
return elem.eid
def _rbe3(elem, cell_type, ugrid, nid_dict):
independent = elem.refgrid
_dependent = elem.Gijs
dependent = []
for dep in _dependent:
if isinstance(dep, list):
dependent.extend(dep)
else:
dependent.append(dep)
nids = []
for i in range(len(dependent)):
nids.append(nid_dict[independent])
nids.append(nid_dict[dependent[i]])
ids = vtk.vtkIdList()
ids.SetNumberOfIds(len(nids))
for i in range(len(nids)):
ids.SetId(i, nids[i])
ugrid.InsertNextCell(cell_type, ids)
return elem.eid
_category_list = ['grid', 'element', 'mpc', 'force', 'disp', 'cord']
_nastran_to_vtk = {
'GRID': (vtk.VTK_VERTEX, _grid, 'grid'),
'CQUAD4': (vtk.VTK_QUAD, _basic_elem, 'element'),
'CTRIA3': (vtk.VTK_TRIANGLE, _basic_elem, 'element'),
'CBEAM': (vtk.VTK_LINE, _basic_elem, 'element'),
'CBUSH': (vtk.VTK_LINE, _basic_elem, 'element'),
'CBAR': (vtk.VTK_LINE, _basic_elem, 'element'),
'RBE2': (vtk.VTK_POLY_LINE, _rbe2, 'mpc'),
'RBE3': (vtk.VTK_POLY_LINE, _rbe3, 'mpc')
}
_card_types = {}
i = 0
for key in sorted(_nastran_to_vtk.keys()):
_card_types[key] = i
i += 1
class UGrid(vtk.vtkUnstructuredGrid):
category_list = _category_list
category_dict = {_category_list[i]: i for i in range(len(_category_list))}
nastran_to_vtk = _nastran_to_vtk
card_types = _card_types
@classmethod
def make_from_bdf(cls, bdf):
# type: (BDF) -> UGrid
nid_list = []
nid_dict = {}
node_pos = np.empty((len(bdf.nodes), 3), dtype=np.float64)
i = 0
for node in itervalues(bdf.nodes):
node_pos[i] = node.get_position()
nid = node.nid
nid_list.append(nid)
nid_dict[nid] = i
i += 1
_points = vtk.vtkPoints()
_points.SetData(numpy_to_vtk(node_pos))
points = vtk.vtkPoints()
points.DeepCopy(_points)
cells = []
cell_types = []
cell_count = 0
elem_types = []
eid_list = []
_nastran_to_vtk = cls.nastran_to_vtk
bdf_data_to_plot = chain(
itervalues(bdf.nodes),
itervalues(bdf.elements),
itervalues(bdf.rigid_elements)
)
category_list = []
category_dict = cls.category_dict
ugrid = vtk.vtkUnstructuredGrid()
ugrid.SetPoints(points)
for elem in bdf_data_to_plot:
elem_type = elem.type
cell_type, add_method, category = _nastran_to_vtk.get(elem_type, (None, None, None))
if cell_type is None:
continue
cell_types.append(cell_type)
elem_types.append(elem_type)
eid = add_method(elem, cell_type, ugrid, nid_dict) # returns element/grid id
eid_list.append(eid)
category_list.append(category_dict[category])
cell_count += 1
id_array = numpy_to_vtk(np.array(cells, dtype=np.int64), deep=1, array_type=vtk.VTK_ID_TYPE)
vtk_cells = vtk.vtkCellArray()
vtk_cells.SetCells(cell_count, id_array)
cell_types = np.array(cell_types, 'B')
vtk_cell_types = numpy_to_vtk(cell_types)
cell_locations = np.array([i for i in range(cell_count)])
vtk_cell_locations = numpy_to_vtk(cell_locations, deep=1, array_type=vtk.VTK_ID_TYPE)
vtk_cell_locations.SetName('index')
ugrid.GetCellData().AddArray(vtk_cell_locations)
_elem_types = vtk.vtkIdTypeArray()
_elem_types.SetName('element_type')
_elem_types.SetNumberOfValues(len(elem_types))
card_types = cls.card_types
for i in range(len(elem_types)):
_elem_types.SetValue(i, card_types[elem_types[i]])
ugrid.GetCellData().AddArray(_elem_types)
_cat = numpy_to_vtk(np.array(category_list, dtype=np.int64), deep=1, array_type=vtk.VTK_ID_TYPE)
_cat.SetName('category')
ugrid.GetCellData().AddArray(_cat)
id_array = numpy_to_vtk( | np.array(eid_list, dtype=np.int64) | numpy.array |
"""
Independent Model
Ensemble of binary classifiers with resulting probabilities normalized across all classes. Assumes all classes are independent and are not related by any class hierarchy.
"""
import numpy as np
from mainmodel.mainmodel import MainModel
from classifiers.binary.optbinary import OptimalBinaryClassifier
class OvAModel(MainModel):
def __init__(self, **data_args):
"""
Initialize Independent Model - call init method of parent class
"""
self.name = "OVA Classifier"
# Use output of Binary Classifier model for OvAModel - just need to
# normalize those probabilities.
if 'init_from_binary' in data_args.keys():
self.init_model = data_args['init_from_binary']
else:
self.init_model = None
super(OvAModel, self).__init__(**data_args)
def run_cfv(self, start_time):
"""
Overwrite MainModel function to see if results from BinaryModel were passed in
"""
if self.init_model is not None:
# Normalize probabilities per row (Except last column which is label)
print("Using Binary Model results.")
self.class_labels = self.init_model.class_labels
self.class_counts = self.init_model.class_counts
self.num_folds = self.init_model.num_folds
self.X = self.init_model.X
self.y = self.init_model.y
self.results = self.init_model.results
a = np.copy(self.init_model.results)
new_results = []
for index in range(self.init_model.num_folds):
trial_results = []
for row_index, row in enumerate(a[index]):
num_classes = len(self.init_model.class_labels)
prob_sum = 0 # compute normalizing sum
for class_index in range(num_classes):
prob_sum += row[class_index]
new_probs = []
for class_index in range(num_classes):
new_probs.append(row[class_index] / prob_sum)
new_probs.append(row[num_classes]) # Add label
trial_results.append(new_probs)
new_results.append( | np.array(trial_results, dtype='object') | numpy.array |
"""
Created on April, 2019
@author: <NAME>
Toolkit functions used for processing training data.
Cite:
<NAME>, et al. "Cooperative Holistic Scene Understanding: Unifying 3D Object, Layout,
and Camera Pose Estimation." Advances in Neural Information Processing Systems. 2018.
"""
import numpy as np
from scipy.spatial import ConvexHull
import re
import cv2
import pickle
import json
from copy import deepcopy
def sample_pnts_from_obj(data, n_pnts = 5000, mode = 'uniform'):
# sample points on each object mesh.
flags = data.keys()
all_pnts = data['v'][:,:3]
area_list = np.array(calculate_face_area(data))
distribution = area_list/np.sum(area_list)
# sample points the probability depends on the face area
new_pnts = []
if mode == 'random':
random_face_ids = np.random.choice(len(data['f']), n_pnts, replace=True, p=distribution)
random_face_ids, sample_counts = np.unique(random_face_ids, return_counts=True)
for face_id, sample_count in zip(random_face_ids, sample_counts):
face = data['f'][face_id]
vid_in_face = [int(item.split('/')[0]) for item in face]
weights = np.diff(np.sort(np.vstack(
[np.zeros((1, sample_count)), np.random.uniform(0, 1, size=(len(vid_in_face) - 1, sample_count)),
| np.ones((1, sample_count)) | numpy.ones |
#######
## 1. Dicom to PNG
# In[]
import SimpleITK as sitk
import os
from skimage import transform,exposure,io
import numpy as np
import cv2
os.environ["CUDA_VISIBLE_DEVICES"]="0"
def windowing(img, center, width):
low = center - width/2
high = center + width/2
img = (img - low)/width
img[img<0.]=0.
img[img>1.]=1.
return img
def dicom2png(src_Path, dst_Path, imshape):
#print(src_Path)
if(not os.path.isdir(src_Path)):
return
if(not os.path.isdir(dst_Path)):
return
for filename in os.listdir(src_Path):
if(filename[-4:] != ".dcm"):
continue
filepath = src_Path + "/" + filename
if(os.path.isdir(filepath)):
continue
print(filepath)
img = sitk.ReadImage(filepath)
img = sitk.GetArrayFromImage(img).astype("int16")
shape = img.shape
img = exposure.equalize_hist(img)
img = img[0,:,:]
if(imshape is not None):
img = transform.resize(img, imshape)
#
#img = windowing(img, 8000, 3000)
img = img*255
img = np.asarray(img, dtype = "int16")
img = np.expand_dims(img, -1)
cv2.imwrite(dst_Path +"/"+ filename[:-4]+"_" + str(shape[1]) + "_" + str(shape[2]) +".png", img)
folder = "D:/[Data]/[Cardiomegaly]/4_ChestPA_ToLabel/[]_0_Original"
dst = "D:/[Data]/[Cardiomegaly]/4_ChestPA_ToLabel/[]_1_BasicData"
imshape = (2048,2048)
for lower in os.listdir(folder) :
if (not os.path.isdir(folder + "/" + lower)):
continue
if( not os.path.isdir(dst + "/" + lower)):
os.mkdir(dst+ "/" + lower)
dicom2png( folder + "/" + lower, dst+ "/" + lower, imshape)
###################################################################################
## 2. Lung Mask
# In[]
##################################################################################
#################################################################################
## 3. Lung Mask Crop
# In[]
import cv2
import numpy as np
import os
from operator import eq
import random
import matplotlib.pyplot as plt
from skimage import io
import shutil
import csv
os.environ["CUDA_VISIBLE_DEVICES"]="0"
folder = "D:/[Data]/[Cardiomegaly]/4_ChestPA_ToLabel/[]_1_BasicData/Imgs"
dst = "D:/[Data]/[Cardiomegaly]/4_ChestPA_ToLabel/[]_2_ImgCropped"
LungMaskPath = "D:/[Data]/[Cardiomegaly]/4_ChestPA_ToLabel/[]_1_BasicData/Masks"
csvfile = "D:/[Data]/[Cardiomegaly]/4_ChestPA_ToLabel/[]_2_ImgCropped/metadata.csv"
for lower in os.listdir(folder) :
if (not os.path.isdir(folder + "/" + lower)):
continue
if( not os.path.isdir(dst + "/" + lower)):
os.mkdir(dst+ "/" + lower)
for file in os.listdir(folder + "/" + lower) :
print(file)
LungMask = cv2.imread(LungMaskPath + "/" + lower + "/" + file, 0)
_, LungMask = cv2.threshold(LungMask, 127, 255, cv2.THRESH_BINARY)
_, contours, _ = cv2.findContours(LungMask, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
rects = []
for cnt in contours:
rects.append(cv2.boundingRect(cnt))
tcomx = 10
tcomy = 10
bcomx = 10
bcomy = 10
top_x, top_y, bottom_x, bottom_y = 0, 0 ,0, 0
rects.sort()
top_x = min([x for (x, y, w, h) in rects]) - tcomx #26
top_y = min([y for (x, y, w, h) in rects]) - tcomy #26
bottom_x = max([x+w for (x, y, w, h) in rects]) + bcomx #234
bottom_y = max([y+h for (x, y, w, h) in rects]) + bcomy #227
#print(top_x, top_y, bottom_x, bottom_y)
if(top_x <=0 ) : top_x = tcomx
if(top_y <=0 ) : top_y = tcomy
if(bottom_x >= 1024 ) : bottom_x = 1024 - tcomx
if(bottom_y >= 1024 ) : bottom_y = 1024 - tcomy
Img = cv2.imread(folder + "/" + lower + "/" + file, 0)
ImgCrop = Img[top_y*2:bottom_y*2, top_x*2:bottom_x*2]
ImgCrop = cv2.resize(ImgCrop, (1024,1024))
cv2.imwrite(dst+ "/" + lower + "/" + file, ImgCrop)
f = open(csvfile, 'a', encoding = "utf-8", newline='')
f_writer = csv.writer(f)
strline = []
strline.append(file)
strline.append(str(top_y*2))
strline.append(str(bottom_y*2))
strline.append(str(top_x*2))
strline.append(str(bottom_x*2))
f_writer.writerow(strline)
f.close()
####################################################################################################################
###################################################################################
## 4. Line Detection
# In[]
##################################################################################
###################################################################################
## 5. Mask recur
# In[]
import os
os.environ["CUDA_VISIBLE_DEVICES"]="0"
import cv2
import csv
import numpy as np
from skimage.morphology import skeletonize
Classlist = ["Aortic Knob", "Lt Lower CB", "Pulmonary Conus", "Rt Lower CB", "Rt Upper CB", "DAO" , "Carina" , "LAA"]
filePath = "D:/[Data]/[Cardiomegaly]/4_ChestPA_ToLabel/[]_3_LineMask"
dstPath = "D:/[Data]/[Cardiomegaly]/4_ChestPA_ToLabel/[]_4_LineMask_Recur"
csvfile = "D:/[Data]/[Cardiomegaly]/4_ChestPA_ToLabel/[]_2_ImgCropped/metadata.csv"
for clss in Classlist :
clsfilepath = filePath + "/" + clss
if(not os.path.isdir(dstPath+"/" + clss) ):
os.mkdir(dstPath+"/" + clss)
for file in os.listdir(clsfilepath):
mask = cv2.imread(clsfilepath + "/" + file, 0)
mask = np.asarray(mask)
f = open(csvfile, 'r', encoding = "utf-8", newline='')
f_reader = csv.reader(f)
xtop = yleft = xbottom = yright = 0
for row in f_reader:
if (row[0] == file):
xtop = int(row[1])
yleft = int(row[3])
xbottom = int(row[2])
yright = int(row[4])
break
f.close()
if xtop == 0 or yleft == 0 or xbottom == 0 or yright == 0:
continue
#print(file)
#print(xbottom, xtop, yright, yleft)
mask_ = cv2.resize(mask, (yright- yleft, xbottom - xtop))
mask_ = | np.asarray(mask_) | numpy.asarray |
"""Kernels for Gaussian process regression and classification.
The kernels in this module allow kernel-engineering, i.e., they can be
combined via the "+" and "*" operators or be exponentiated with a scalar
via "**". These sum and product expressions can also contain scalar values,
which are automatically converted to a constant kernel.
All kernels allow (analytic) gradient-based hyperparameter optimization.
The space of hyperparameters can be specified by giving lower und upper
boundaries for the value of each hyperparameter (the search space is thus
rectangular). Instead of specifying bounds, hyperparameters can also be
declared to be "fixed", which causes these hyperparameters to be excluded from
optimization.
"""
# Author: <NAME> <<EMAIL>>
# License: BSD 3 clause
# Note: this module is strongly inspired by the kernel module of the george
# package.
from abc import ABCMeta, abstractmethod
from collections import namedtuple
import math
from inspect import signature
import warnings
import numpy as np
from scipy.special import kv, gamma
from scipy.spatial.distance import pdist, cdist, squareform
from ..metrics.pairwise import pairwise_kernels
from ..base import clone
def _check_length_scale(X, length_scale):
length_scale = np.squeeze(length_scale).astype(float)
if np.ndim(length_scale) > 1:
raise ValueError("length_scale cannot be of dimension greater than 1")
if np.ndim(length_scale) == 1 and X.shape[1] != length_scale.shape[0]:
raise ValueError("Anisotropic kernel must have the same number of "
"dimensions as data (%d!=%d)"
% (length_scale.shape[0], X.shape[1]))
return length_scale
class Hyperparameter(namedtuple('Hyperparameter',
('name', 'value_type', 'bounds',
'n_elements', 'fixed'))):
"""A kernel hyperparameter's specification in form of a namedtuple.
.. versionadded:: 0.18
Attributes
----------
name : string
The name of the hyperparameter. Note that a kernel using a
hyperparameter with name "x" must have the attributes self.x and
self.x_bounds
value_type : string
The type of the hyperparameter. Currently, only "numeric"
hyperparameters are supported.
bounds : pair of floats >= 0 or "fixed"
The lower and upper bound on the parameter. If n_elements>1, a pair
of 1d array with n_elements each may be given alternatively. If
the string "fixed" is passed as bounds, the hyperparameter's value
cannot be changed.
n_elements : int, default=1
The number of elements of the hyperparameter value. Defaults to 1,
which corresponds to a scalar hyperparameter. n_elements > 1
corresponds to a hyperparameter which is vector-valued,
such as, e.g., anisotropic length-scales.
fixed : bool, default: None
Whether the value of this hyperparameter is fixed, i.e., cannot be
changed during hyperparameter tuning. If None is passed, the "fixed" is
derived based on the given bounds.
"""
# A raw namedtuple is very memory efficient as it packs the attributes
# in a struct to get rid of the __dict__ of attributes in particular it
# does not copy the string for the keys on each instance.
# By deriving a namedtuple class just to introduce the __init__ method we
# would also reintroduce the __dict__ on the instance. By telling the
# Python interpreter that this subclass uses static __slots__ instead of
# dynamic attributes. Furthermore we don't need any additional slot in the
# subclass so we set __slots__ to the empty tuple.
__slots__ = ()
def __new__(cls, name, value_type, bounds, n_elements=1, fixed=None):
if not isinstance(bounds, str) or bounds != "fixed":
bounds = np.atleast_2d(bounds)
if n_elements > 1: # vector-valued parameter
if bounds.shape[0] == 1:
bounds = np.repeat(bounds, n_elements, 0)
elif bounds.shape[0] != n_elements:
raise ValueError("Bounds on %s should have either 1 or "
"%d dimensions. Given are %d"
% (name, n_elements, bounds.shape[0]))
if fixed is None:
fixed = isinstance(bounds, str) and bounds == "fixed"
return super(Hyperparameter, cls).__new__(
cls, name, value_type, bounds, n_elements, fixed)
# This is mainly a testing utility to check that two hyperparameters
# are equal.
def __eq__(self, other):
return (self.name == other.name and
self.value_type == other.value_type and
np.all(self.bounds == other.bounds) and
self.n_elements == other.n_elements and
self.fixed == other.fixed)
class Kernel(metaclass=ABCMeta):
"""Base class for all kernels.
.. versionadded:: 0.18
"""
def get_params(self, deep=True):
"""Get parameters of this kernel.
Parameters
----------
deep : boolean, optional
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
-------
params : mapping of string to any
Parameter names mapped to their values.
"""
params = dict()
# introspect the constructor arguments to find the model parameters
# to represent
cls = self.__class__
init = getattr(cls.__init__, 'deprecated_original', cls.__init__)
init_sign = signature(init)
args, varargs = [], []
for parameter in init_sign.parameters.values():
if (parameter.kind != parameter.VAR_KEYWORD and
parameter.name != 'self'):
args.append(parameter.name)
if parameter.kind == parameter.VAR_POSITIONAL:
varargs.append(parameter.name)
if len(varargs) != 0:
raise RuntimeError("scikit-learn kernels should always "
"specify their parameters in the signature"
" of their __init__ (no varargs)."
" %s doesn't follow this convention."
% (cls, ))
for arg in args:
try:
value = getattr(self, arg)
except AttributeError:
warnings.warn('From version 0.24, get_params will raise an '
'AttributeError if a parameter cannot be '
'retrieved as an instance attribute. Previously '
'it would return None.',
FutureWarning)
value = None
params[arg] = value
return params
def set_params(self, **params):
"""Set the parameters of this kernel.
The method works on simple kernels as well as on nested kernels.
The latter have parameters of the form ``<component>__<parameter>``
so that it's possible to update each component of a nested object.
Returns
-------
self
"""
if not params:
# Simple optimisation to gain speed (inspect is slow)
return self
valid_params = self.get_params(deep=True)
for key, value in params.items():
split = key.split('__', 1)
if len(split) > 1:
# nested objects case
name, sub_name = split
if name not in valid_params:
raise ValueError('Invalid parameter %s for kernel %s. '
'Check the list of available parameters '
'with `kernel.get_params().keys()`.' %
(name, self))
sub_object = valid_params[name]
sub_object.set_params(**{sub_name: value})
else:
# simple objects case
if key not in valid_params:
raise ValueError('Invalid parameter %s for kernel %s. '
'Check the list of available parameters '
'with `kernel.get_params().keys()`.' %
(key, self.__class__.__name__))
setattr(self, key, value)
return self
def clone_with_theta(self, theta):
"""Returns a clone of self with given hyperparameters theta.
Parameters
----------
theta : array, shape (n_dims,)
The hyperparameters
"""
cloned = clone(self)
cloned.theta = theta
return cloned
@property
def n_dims(self):
"""Returns the number of non-fixed hyperparameters of the kernel."""
return self.theta.shape[0]
@property
def hyperparameters(self):
"""Returns a list of all hyperparameter specifications."""
r = [getattr(self, attr) for attr in dir(self)
if attr.startswith("hyperparameter_")]
return r
@property
def theta(self):
"""Returns the (flattened, log-transformed) non-fixed hyperparameters.
Note that theta are typically the log-transformed values of the
kernel's hyperparameters as this representation of the search space
is more amenable for hyperparameter search, as hyperparameters like
length-scales naturally live on a log-scale.
Returns
-------
theta : array, shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
theta = []
params = self.get_params()
for hyperparameter in self.hyperparameters:
if not hyperparameter.fixed:
theta.append(params[hyperparameter.name])
if len(theta) > 0:
return np.log(np.hstack(theta))
else:
return np.array([])
@theta.setter
def theta(self, theta):
"""Sets the (flattened, log-transformed) non-fixed hyperparameters.
Parameters
----------
theta : array, shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
params = self.get_params()
i = 0
for hyperparameter in self.hyperparameters:
if hyperparameter.fixed:
continue
if hyperparameter.n_elements > 1:
# vector-valued parameter
params[hyperparameter.name] = np.exp(
theta[i:i + hyperparameter.n_elements])
i += hyperparameter.n_elements
else:
params[hyperparameter.name] = np.exp(theta[i])
i += 1
if i != len(theta):
raise ValueError("theta has not the correct number of entries."
" Should be %d; given are %d"
% (i, len(theta)))
self.set_params(**params)
@property
def bounds(self):
"""Returns the log-transformed bounds on the theta.
Returns
-------
bounds : array, shape (n_dims, 2)
The log-transformed bounds on the kernel's hyperparameters theta
"""
bounds = [hyperparameter.bounds
for hyperparameter in self.hyperparameters
if not hyperparameter.fixed]
if len(bounds) > 0:
return np.log(np.vstack(bounds))
else:
return np.array([])
def __add__(self, b):
if not isinstance(b, Kernel):
return Sum(self, ConstantKernel(b))
return Sum(self, b)
def __radd__(self, b):
if not isinstance(b, Kernel):
return Sum(ConstantKernel(b), self)
return Sum(b, self)
def __mul__(self, b):
if not isinstance(b, Kernel):
return Product(self, ConstantKernel(b))
return Product(self, b)
def __rmul__(self, b):
if not isinstance(b, Kernel):
return Product(ConstantKernel(b), self)
return Product(b, self)
def __pow__(self, b):
return Exponentiation(self, b)
def __eq__(self, b):
if type(self) != type(b):
return False
params_a = self.get_params()
params_b = b.get_params()
for key in set(list(params_a.keys()) + list(params_b.keys())):
if np.any(params_a.get(key, None) != params_b.get(key, None)):
return False
return True
def __repr__(self):
return "{0}({1})".format(self.__class__.__name__,
", ".join(map("{0:.3g}".format, self.theta)))
@abstractmethod
def __call__(self, X, Y=None, eval_gradient=False):
"""Evaluate the kernel."""
@abstractmethod
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
@abstractmethod
def is_stationary(self):
"""Returns whether the kernel is stationary. """
class NormalizedKernelMixin:
"""Mixin for kernels which are normalized: k(X, X)=1.
.. versionadded:: 0.18
"""
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
return np.ones(X.shape[0])
class StationaryKernelMixin:
"""Mixin for kernels which are stationary: k(X, Y)= f(X-Y).
.. versionadded:: 0.18
"""
def is_stationary(self):
"""Returns whether the kernel is stationary. """
return True
class CompoundKernel(Kernel):
"""Kernel which is composed of a set of other kernels.
.. versionadded:: 0.18
Parameters
----------
kernels : list of Kernel objects
The other kernels
"""
def __init__(self, kernels):
self.kernels = kernels
def get_params(self, deep=True):
"""Get parameters of this kernel.
Parameters
----------
deep : boolean, optional
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
-------
params : mapping of string to any
Parameter names mapped to their values.
"""
return dict(kernels=self.kernels)
@property
def theta(self):
"""Returns the (flattened, log-transformed) non-fixed hyperparameters.
Note that theta are typically the log-transformed values of the
kernel's hyperparameters as this representation of the search space
is more amenable for hyperparameter search, as hyperparameters like
length-scales naturally live on a log-scale.
Returns
-------
theta : array, shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
return np.hstack([kernel.theta for kernel in self.kernels])
@theta.setter
def theta(self, theta):
"""Sets the (flattened, log-transformed) non-fixed hyperparameters.
Parameters
----------
theta : array, shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
k_dims = self.k1.n_dims
for i, kernel in enumerate(self.kernels):
kernel.theta = theta[i * k_dims:(i + 1) * k_dims]
@property
def bounds(self):
"""Returns the log-transformed bounds on the theta.
Returns
-------
bounds : array, shape (n_dims, 2)
The log-transformed bounds on the kernel's hyperparameters theta
"""
return np.vstack([kernel.bounds for kernel in self.kernels])
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Note that this compound kernel returns the results of all simple kernel
stacked along an additional axis.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y, n_kernels)
Kernel k(X, Y)
K_gradient : array, shape (n_samples_X, n_samples_X, n_dims, n_kernels)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
if eval_gradient:
K = []
K_grad = []
for kernel in self.kernels:
K_single, K_grad_single = kernel(X, Y, eval_gradient)
K.append(K_single)
K_grad.append(K_grad_single[..., np.newaxis])
return np.dstack(K), np.concatenate(K_grad, 3)
else:
return np.dstack([kernel(X, Y, eval_gradient)
for kernel in self.kernels])
def __eq__(self, b):
if type(self) != type(b) or len(self.kernels) != len(b.kernels):
return False
return np.all([self.kernels[i] == b.kernels[i]
for i in range(len(self.kernels))])
def is_stationary(self):
"""Returns whether the kernel is stationary. """
return np.all([kernel.is_stationary() for kernel in self.kernels])
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X, n_kernels)
Diagonal of kernel k(X, X)
"""
return np.vstack([kernel.diag(X) for kernel in self.kernels]).T
class KernelOperator(Kernel):
"""Base class for all kernel operators.
.. versionadded:: 0.18
"""
def __init__(self, k1, k2):
self.k1 = k1
self.k2 = k2
def get_params(self, deep=True):
"""Get parameters of this kernel.
Parameters
----------
deep : boolean, optional
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
-------
params : mapping of string to any
Parameter names mapped to their values.
"""
params = dict(k1=self.k1, k2=self.k2)
if deep:
deep_items = self.k1.get_params().items()
params.update(('k1__' + k, val) for k, val in deep_items)
deep_items = self.k2.get_params().items()
params.update(('k2__' + k, val) for k, val in deep_items)
return params
@property
def hyperparameters(self):
"""Returns a list of all hyperparameter."""
r = [Hyperparameter("k1__" + hyperparameter.name,
hyperparameter.value_type,
hyperparameter.bounds, hyperparameter.n_elements)
for hyperparameter in self.k1.hyperparameters]
for hyperparameter in self.k2.hyperparameters:
r.append(Hyperparameter("k2__" + hyperparameter.name,
hyperparameter.value_type,
hyperparameter.bounds,
hyperparameter.n_elements))
return r
@property
def theta(self):
"""Returns the (flattened, log-transformed) non-fixed hyperparameters.
Note that theta are typically the log-transformed values of the
kernel's hyperparameters as this representation of the search space
is more amenable for hyperparameter search, as hyperparameters like
length-scales naturally live on a log-scale.
Returns
-------
theta : array, shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
return np.append(self.k1.theta, self.k2.theta)
@theta.setter
def theta(self, theta):
"""Sets the (flattened, log-transformed) non-fixed hyperparameters.
Parameters
----------
theta : array, shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
k1_dims = self.k1.n_dims
self.k1.theta = theta[:k1_dims]
self.k2.theta = theta[k1_dims:]
@property
def bounds(self):
"""Returns the log-transformed bounds on the theta.
Returns
-------
bounds : array, shape (n_dims, 2)
The log-transformed bounds on the kernel's hyperparameters theta
"""
if self.k1.bounds.size == 0:
return self.k2.bounds
if self.k2.bounds.size == 0:
return self.k1.bounds
return np.vstack((self.k1.bounds, self.k2.bounds))
def __eq__(self, b):
if type(self) != type(b):
return False
return (self.k1 == b.k1 and self.k2 == b.k2) \
or (self.k1 == b.k2 and self.k2 == b.k1)
def is_stationary(self):
"""Returns whether the kernel is stationary. """
return self.k1.is_stationary() and self.k2.is_stationary()
class Sum(KernelOperator):
"""Sum-kernel k1 + k2 of two kernels k1 and k2.
The resulting kernel is defined as
k_sum(X, Y) = k1(X, Y) + k2(X, Y)
.. versionadded:: 0.18
Parameters
----------
k1 : Kernel object
The first base-kernel of the sum-kernel
k2 : Kernel object
The second base-kernel of the sum-kernel
"""
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
if eval_gradient:
K1, K1_gradient = self.k1(X, Y, eval_gradient=True)
K2, K2_gradient = self.k2(X, Y, eval_gradient=True)
return K1 + K2, np.dstack((K1_gradient, K2_gradient))
else:
return self.k1(X, Y) + self.k2(X, Y)
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
return self.k1.diag(X) + self.k2.diag(X)
def __repr__(self):
return "{0} + {1}".format(self.k1, self.k2)
class Product(KernelOperator):
"""Product-kernel k1 * k2 of two kernels k1 and k2.
The resulting kernel is defined as
k_prod(X, Y) = k1(X, Y) * k2(X, Y)
.. versionadded:: 0.18
Parameters
----------
k1 : Kernel object
The first base-kernel of the product-kernel
k2 : Kernel object
The second base-kernel of the product-kernel
"""
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
if eval_gradient:
K1, K1_gradient = self.k1(X, Y, eval_gradient=True)
K2, K2_gradient = self.k2(X, Y, eval_gradient=True)
return K1 * K2, np.dstack((K1_gradient * K2[:, :, np.newaxis],
K2_gradient * K1[:, :, np.newaxis]))
else:
return self.k1(X, Y) * self.k2(X, Y)
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
return self.k1.diag(X) * self.k2.diag(X)
def __repr__(self):
return "{0} * {1}".format(self.k1, self.k2)
class Exponentiation(Kernel):
"""Exponentiate kernel by given exponent.
The resulting kernel is defined as
k_exp(X, Y) = k(X, Y) ** exponent
.. versionadded:: 0.18
Parameters
----------
kernel : Kernel object
The base kernel
exponent : float
The exponent for the base kernel
"""
def __init__(self, kernel, exponent):
self.kernel = kernel
self.exponent = exponent
def get_params(self, deep=True):
"""Get parameters of this kernel.
Parameters
----------
deep : boolean, optional
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
-------
params : mapping of string to any
Parameter names mapped to their values.
"""
params = dict(kernel=self.kernel, exponent=self.exponent)
if deep:
deep_items = self.kernel.get_params().items()
params.update(('kernel__' + k, val) for k, val in deep_items)
return params
@property
def hyperparameters(self):
"""Returns a list of all hyperparameter."""
r = []
for hyperparameter in self.kernel.hyperparameters:
r.append(Hyperparameter("kernel__" + hyperparameter.name,
hyperparameter.value_type,
hyperparameter.bounds,
hyperparameter.n_elements))
return r
@property
def theta(self):
"""Returns the (flattened, log-transformed) non-fixed hyperparameters.
Note that theta are typically the log-transformed values of the
kernel's hyperparameters as this representation of the search space
is more amenable for hyperparameter search, as hyperparameters like
length-scales naturally live on a log-scale.
Returns
-------
theta : array, shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
return self.kernel.theta
@theta.setter
def theta(self, theta):
"""Sets the (flattened, log-transformed) non-fixed hyperparameters.
Parameters
----------
theta : array, shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
self.kernel.theta = theta
@property
def bounds(self):
"""Returns the log-transformed bounds on the theta.
Returns
-------
bounds : array, shape (n_dims, 2)
The log-transformed bounds on the kernel's hyperparameters theta
"""
return self.kernel.bounds
def __eq__(self, b):
if type(self) != type(b):
return False
return (self.kernel == b.kernel and self.exponent == b.exponent)
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
if eval_gradient:
K, K_gradient = self.kernel(X, Y, eval_gradient=True)
K_gradient *= \
self.exponent * K[:, :, np.newaxis] ** (self.exponent - 1)
return K ** self.exponent, K_gradient
else:
K = self.kernel(X, Y, eval_gradient=False)
return K ** self.exponent
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
return self.kernel.diag(X) ** self.exponent
def __repr__(self):
return "{0} ** {1}".format(self.kernel, self.exponent)
def is_stationary(self):
"""Returns whether the kernel is stationary. """
return self.kernel.is_stationary()
class ConstantKernel(StationaryKernelMixin, Kernel):
"""Constant kernel.
Can be used as part of a product-kernel where it scales the magnitude of
the other factor (kernel) or as part of a sum-kernel, where it modifies
the mean of the Gaussian process.
k(x_1, x_2) = constant_value for all x_1, x_2
.. versionadded:: 0.18
Parameters
----------
constant_value : float, default: 1.0
The constant value which defines the covariance:
k(x_1, x_2) = constant_value
constant_value_bounds : pair of floats >= 0, default: (1e-5, 1e5)
The lower and upper bound on constant_value
"""
def __init__(self, constant_value=1.0, constant_value_bounds=(1e-5, 1e5)):
self.constant_value = constant_value
self.constant_value_bounds = constant_value_bounds
@property
def hyperparameter_constant_value(self):
return Hyperparameter(
"constant_value", "numeric", self.constant_value_bounds)
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined. Only supported when Y is None.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
X = np.atleast_2d(X)
if Y is None:
Y = X
elif eval_gradient:
raise ValueError("Gradient can only be evaluated when Y is None.")
K = np.full((X.shape[0], Y.shape[0]), self.constant_value,
dtype=np.array(self.constant_value).dtype)
if eval_gradient:
if not self.hyperparameter_constant_value.fixed:
return (K, np.full((X.shape[0], X.shape[0], 1),
self.constant_value,
dtype=np.array(self.constant_value).dtype))
else:
return K, np.empty((X.shape[0], X.shape[0], 0))
else:
return K
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
return np.full(X.shape[0], self.constant_value,
dtype=np.array(self.constant_value).dtype)
def __repr__(self):
return "{0:.3g}**2".format(np.sqrt(self.constant_value))
class WhiteKernel(StationaryKernelMixin, Kernel):
"""White kernel.
The main use-case of this kernel is as part of a sum-kernel where it
explains the noise of the signal as independently and identically
normally-distributed. The parameter noise_level equals the variance of this
noise.
k(x_1, x_2) = noise_level if x_1 == x_2 else 0
.. versionadded:: 0.18
Parameters
----------
noise_level : float, default: 1.0
Parameter controlling the noise level (variance)
noise_level_bounds : pair of floats >= 0, default: (1e-5, 1e5)
The lower and upper bound on noise_level
"""
def __init__(self, noise_level=1.0, noise_level_bounds=(1e-5, 1e5)):
self.noise_level = noise_level
self.noise_level_bounds = noise_level_bounds
@property
def hyperparameter_noise_level(self):
return Hyperparameter(
"noise_level", "numeric", self.noise_level_bounds)
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined. Only supported when Y is None.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
X = np.atleast_2d(X)
if Y is not None and eval_gradient:
raise ValueError("Gradient can only be evaluated when Y is None.")
if Y is None:
K = self.noise_level * | np.eye(X.shape[0]) | numpy.eye |
from abc import *
import os
import numpy as np
import time
import tensorflow as tf
from keras.datasets import mnist
import matplotlib.pyplot as plt
from keras.models import Sequential
from keras.layers import Dense, Activation, Flatten, Reshape
from keras.layers import Conv2D, Conv2DTranspose, UpSampling2D
from keras.layers import LeakyReLU, Dropout, ReLU
from keras.layers import BatchNormalization
from keras.optimizers import Adam, RMSprop
IMGDIR = './imgs'
PLOTIMGROW=5
PLOTIMGCOL=5
class ElapsedTimer(object):
def __init__(self):
self.start = time.time()
def elapsed(self, sec):
if sec < 60:
return "{} sec".format(sec)
elif sec < 3600:
return "{} min".format(sec / 60)
else:
return "{} hr".format(sec / 3600)
def elapsed_time(self):
print("Elapsed: {} ".format(self.elapsed(time.time() - self.start)))
class GanBase(object):
def __init__(self, row=28, col=28, chn=1):
self.img_row = row
self.img_col = col
self.img_chn = chn
self.latent_size = 100
self.hidden_size = 256
self.img_size = row * col * chn
self.D = self.build_discriminator() # discriminator
self.G = self.build_generator() # generator
self.DM = self.build_discriminator_model() # discriminator loss
self.AM = self.build_adversarial_model() # generator loss
@abstractmethod
def build_discriminator(self):
pass
@abstractmethod
def build_generator(self):
pass
@abstractmethod
def build_discriminator_model(self):
pass
@abstractmethod
def build_adversarial_model(self):
pass
class DcGan(GanBase):
def build_discriminator(self):
seq = Sequential()
depth = self.latent_size
dropout = 0.4
# In: 28 x 28 x 1, depth = 1
# Out: 14 x 14 x 1, depth=64
input_shape = (self.img_row, self.img_col, self.img_chn)
seq.add(Conv2D(depth*1, 5, strides=2, input_shape=input_shape, padding='same'))
seq.add(LeakyReLU(alpha=0.2))
seq.add(Dropout(dropout))
seq.add(Conv2D(depth*2, 5, strides=2, padding='same'))
seq.add(LeakyReLU(alpha=0.2))
seq.add(Dropout(dropout))
seq.add(Conv2D(depth*4, 5, strides=2, padding='same'))
seq.add(LeakyReLU(alpha=0.2))
seq.add(Dropout(dropout))
seq.add(Conv2D(depth*8, 5, strides=1, padding='same'))
seq.add(LeakyReLU(alpha=0.2))
seq.add(Dropout(dropout))
# Out: 1-dim probability
seq.add(Flatten())
seq.add(Dense(1))
seq.add(Activation('sigmoid'))
seq.summary()
return seq
def build_generator(self):
seq = Sequential()
dropout = 0.4
depth = self.latent_size * 4
dim = 7
# In: 100
# Out: dim x dim x depth
seq.add(Dense(dim*dim*depth, input_dim=self.latent_size))
seq.add(BatchNormalization(momentum=0.9))
seq.add(Activation('relu'))
seq.add(Reshape((dim, dim, depth)))
seq.add(Dropout(dropout))
# In: dim x dim x depth
# Out: 2*dim x 2*dim x depth/2
seq.add(UpSampling2D())
seq.add(Conv2DTranspose(int(depth/2), 5, padding='same'))
seq.add(BatchNormalization(momentum=0.9))
seq.add(Activation('relu'))
seq.add(UpSampling2D())
seq.add(Conv2DTranspose(int(depth/4), 5, padding='same'))
seq.add(BatchNormalization(momentum=0.9))
seq.add(Activation('relu'))
seq.add(Conv2DTranspose(int(depth/8), 5, padding='same'))
seq.add(BatchNormalization(momentum=0.9))
seq.add(Activation('relu'))
# Out: 28 x 28 x 1 grayscale image [0.0,1.0] per pix
seq.add(Conv2DTranspose(1, 5, padding='same'))
seq.add(Activation('sigmoid'))
seq.summary()
return seq
def build_discriminator_model(self):
optimizer = RMSprop(lr=0.0002, decay=6e-8)
seq = Sequential()
seq.add(self.D)
seq.compile(loss='binary_crossentropy', optimizer=optimizer,
metrics=['accuracy'])
return seq
def build_adversarial_model(self):
optimizer = RMSprop(lr=0.0001, decay=3e-8)
seq = Sequential()
seq.add(self.G)
seq.add(self.D)
seq.compile(loss='binary_crossentropy', optimizer=optimizer,
metrics=['accuracy'])
return seq
class BasicGan(GanBase):
def build_discriminator(self):
input_shape = (self.img_row, self.img_col, self.img_chn)
seq = Sequential()
seq.add(Flatten(input_shape=input_shape))
seq.add(Dense(self.hidden_size))
seq.add(LeakyReLU(alpha=0.2))
seq.add(Dense(self.hidden_size))
seq.add(LeakyReLU(alpha=0.2))
seq.add(Dense(1))
seq.add(Activation('sigmoid'))
#seq.summary()
return seq
def build_generator(self):
output_shape = (self.img_row, self.img_col, self.img_chn)
seq = Sequential()
seq.add(Dense(self.hidden_size, input_dim=self.latent_size))
seq.add(ReLU())
seq.add(Dense(self.hidden_size))
seq.add(ReLU())
seq.add(Dense(self.img_size))
seq.add(Activation('tanh'))
seq.add(Reshape(output_shape))
#seq.summary()
return seq
def build_discriminator_model(self):
seq = Sequential()
seq.add(self.D)
seq.compile(optimizer=Adam(lr=0.0002), loss='binary_crossentropy', metrics=['accuracy'])
seq.summary()
return seq
def build_adversarial_model(self):
self.D.trainable = False
seq = Sequential()
seq.add(self.G)
seq.add(self.D)
seq.compile(optimizer=Adam(lr=0.0002), loss='binary_crossentropy', metrics=['accuracy'])
seq.summary()
return seq
class MnistGan(object):
def __init__(self):
self.dcgan = BasicGan()
#import pdb; pdb.set_trace()
(self.x_train, _), (_, _) = mnist.load_data()
self.x_train = self.x_train / 127.5 - 1.
self.x_train = np.expand_dims(self.x_train, axis=3)
def train(self, epoch_size=200, batch_size=128, save_interval=1):
real_labels = np.ones([batch_size, 1])
fake_labels = np.zeros([batch_size, 1])
comp_labels = np.concatenate((real_labels, fake_labels))
for i in range(epoch_size):
real_imgs = self.x_train[np.random.randint(0,
self.x_train.shape[0], size=batch_size), :, :, :]
fake_imgs = np.random.uniform(-1.0, 1.0, size=[batch_size, self.dcgan.latent_size])
fake_imgs = self.dcgan.G.predict(fake_imgs)
comp_imgs = np.concatenate((real_imgs, fake_imgs))
# ============================================================ #
# Train the discriminator #
# ============================================================ #
d_loss = self.dcgan.DM.train_on_batch(comp_imgs, comp_labels)
# ============================================================ #
# Train the generator #
# ============================================================ #
fake_imgs = np.random.uniform(-1.0, 1.0, size=[batch_size, self.dcgan.latent_size])
a_loss = self.dcgan.AM.train_on_batch(fake_imgs, real_labels)
msg = "%04d: [D loss: %f, acc: %f]" % (i, d_loss[0], d_loss[1])
msg = "%s [A loss: %f, acc: %f]" % (msg, a_loss[0], a_loss[1])
print(msg)
if i % save_interval == 0:
fake_img = | np.random.uniform(-1, 1, size=[PLOTIMGROW*PLOTIMGCOL, self.dcgan.latent_size]) | numpy.random.uniform |
import numpy as np
from dataloaders.datasets.blending_masks import blending_masks
def test1():
'''
x x x
x x x
xyxyxy
z z z
z z z
'''
masks = blending_masks(
np.array([[ (0,0), (0,2) ]], dtype=np.int32), size=3)
np.testing.assert_almost_equal(
masks,
np.array([[
[
[1., 1., 0.5],
[1., 1., 0.5],
[1., 1., 0.5],
],
[
[0.5, 1., 1.],
[0.5, 1., 1.],
[0.5, 1., 1.],
]
]])
)
def test2():
'''
x x x
x x x
xyxyxy
z z z
z z z
'''
masks = blending_masks(np.array([[(0,0)], [(2,0)]], dtype=np.int32), size=3)
np.testing.assert_almost_equal(
masks,
np.array([
[[
[1., 1., 1.],
[1., 1., 1.],
[0.5, 0.5, 0.5],
]],
[[
[0.5, 0.5, 0.5],
[1., 1., 1.],
[1., 1., 1.],
]]
])
)
def test3():
'''
x x x
x x x
xyxyxy
z z z
z z z
'''
masks = blending_masks(np.array(
[ [(0,0)], [(2,0)], [(4,0)] ], dtype=np.int32), size=3)
np.testing.assert_almost_equal(
masks,
np.array([[
[
[1., 1., 0.5],
[1., 1., 0.5],
[1., 1., 0.5],
],
[
[0.5, 1., 0.5],
[0.5, 1., 0.5],
[0.5, 1., 0.5],
],
[
[0.5, 1., 1.],
[0.5, 1., 1.],
[0.5, 1., 1.],
]
]])
)
def test3():
offsets = np.array([
[(0, 0), (0, 75)],
[(60, 0), (60, 75)],
[(120, 0), (120,75)]
], dtype=np.int32)
size = 100
masks = blending_masks(offsets, size=size)
width = np.max(offsets[:,:,0]) + size
height = np.max(offsets[:,:,1]) + size
pic = np.zeros((width, height), dtype=np.float32)
for x in range(offsets.shape[0]):
for y in range(offsets.shape[1]):
offx, offy = offsets[x,y]
pic[offx:offx+size,offy:offy+size] += masks[x,y]
np.testing.assert_almost_equal(np.min(pic), 1.)
np.testing.assert_almost_equal( | np.max(pic) | numpy.max |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import namedtuple
from copy import copy, deepcopy
from datetime import datetime, timedelta
from textwrap import dedent
import pytest
from distutils.version import LooseVersion
import numpy as np
import pytz
import pandas as pd
from xarray import Variable, IndexVariable, Coordinate, Dataset
from xarray.core import indexing
from xarray.core.variable import as_variable, as_compatible_data
from xarray.core.indexing import PandasIndexAdapter, LazilyIndexedArray
from xarray.core.pycompat import PY3, OrderedDict
from xarray.core.common import full_like, zeros_like, ones_like
from . import TestCase, source_ndarray, requires_dask
class VariableSubclassTestCases(object):
def test_properties(self):
data = 0.5 * np.arange(10)
v = self.cls(['time'], data, {'foo': 'bar'})
self.assertEqual(v.dims, ('time',))
self.assertArrayEqual(v.values, data)
self.assertEqual(v.dtype, float)
self.assertEqual(v.shape, (10,))
self.assertEqual(v.size, 10)
self.assertEqual(v.sizes, {'time': 10})
self.assertEqual(v.nbytes, 80)
self.assertEqual(v.ndim, 1)
self.assertEqual(len(v), 10)
self.assertEqual(v.attrs, {'foo': u'bar'})
def test_attrs(self):
v = self.cls(['time'], 0.5 * np.arange(10))
self.assertEqual(v.attrs, {})
attrs = {'foo': 'bar'}
v.attrs = attrs
self.assertEqual(v.attrs, attrs)
self.assertIsInstance(v.attrs, OrderedDict)
v.attrs['foo'] = 'baz'
self.assertEqual(v.attrs['foo'], 'baz')
def test_getitem_dict(self):
v = self.cls(['x'], np.random.randn(5))
actual = v[{'x': 0}]
expected = v[0]
self.assertVariableIdentical(expected, actual)
def _assertIndexedLikeNDArray(self, variable, expected_value0,
expected_dtype=None):
"""Given a 1-dimensional variable, verify that the variable is indexed
like a numpy.ndarray.
"""
self.assertEqual(variable[0].shape, ())
self.assertEqual(variable[0].ndim, 0)
self.assertEqual(variable[0].size, 1)
# test identity
self.assertTrue(variable.equals(variable.copy()))
self.assertTrue(variable.identical(variable.copy()))
# check value is equal for both ndarray and Variable
self.assertEqual(variable.values[0], expected_value0)
self.assertEqual(variable[0].values, expected_value0)
# check type or dtype is consistent for both ndarray and Variable
if expected_dtype is None:
# check output type instead of array dtype
self.assertEqual(type(variable.values[0]), type(expected_value0))
self.assertEqual(type(variable[0].values), type(expected_value0))
elif expected_dtype is not False:
self.assertEqual(variable.values[0].dtype, expected_dtype)
self.assertEqual(variable[0].values.dtype, expected_dtype)
def test_index_0d_int(self):
for value, dtype in [(0, np.int_),
(np.int32(0), np.int32)]:
x = self.cls(['x'], [value])
self._assertIndexedLikeNDArray(x, value, dtype)
def test_index_0d_float(self):
for value, dtype in [(0.5, np.float_),
(np.float32(0.5), np.float32)]:
x = self.cls(['x'], [value])
self._assertIndexedLikeNDArray(x, value, dtype)
def test_index_0d_string(self):
for value, dtype in [('foo', np.dtype('U3' if PY3 else 'S3')),
(u'foo', np.dtype('U3'))]:
x = self.cls(['x'], [value])
self._assertIndexedLikeNDArray(x, value, dtype)
def test_index_0d_datetime(self):
d = datetime(2000, 1, 1)
x = self.cls(['x'], [d])
self._assertIndexedLikeNDArray(x, np.datetime64(d))
x = self.cls(['x'], [np.datetime64(d)])
self._assertIndexedLikeNDArray(x, np.datetime64(d), 'datetime64[ns]')
x = self.cls(['x'], pd.DatetimeIndex([d]))
self._assertIndexedLikeNDArray(x, np.datetime64(d), 'datetime64[ns]')
def test_index_0d_timedelta64(self):
td = timedelta(hours=1)
x = self.cls(['x'], [np.timedelta64(td)])
self._assertIndexedLikeNDArray(x, np.timedelta64(td), 'timedelta64[ns]')
x = self.cls(['x'], pd.to_timedelta([td]))
self._assertIndexedLikeNDArray(x, np.timedelta64(td), 'timedelta64[ns]')
def test_index_0d_not_a_time(self):
d = np.datetime64('NaT', 'ns')
x = self.cls(['x'], [d])
self._assertIndexedLikeNDArray(x, d)
def test_index_0d_object(self):
class HashableItemWrapper(object):
def __init__(self, item):
self.item = item
def __eq__(self, other):
return self.item == other.item
def __hash__(self):
return hash(self.item)
def __repr__(self):
return '%s(item=%r)' % (type(self).__name__, self.item)
item = HashableItemWrapper((1, 2, 3))
x = self.cls('x', [item])
self._assertIndexedLikeNDArray(x, item, expected_dtype=False)
def test_0d_object_array_with_list(self):
listarray = np.empty((1,), dtype=object)
listarray[0] = [1, 2, 3]
x = self.cls('x', listarray)
assert x.data == listarray
assert x[0].data == listarray.squeeze()
assert x.squeeze().data == listarray.squeeze()
def test_index_and_concat_datetime(self):
# regression test for #125
date_range = pd.date_range('2011-09-01', periods=10)
for dates in [date_range, date_range.values,
date_range.to_pydatetime()]:
expected = self.cls('t', dates)
for times in [[expected[i] for i in range(10)],
[expected[i:(i + 1)] for i in range(10)],
[expected[[i]] for i in range(10)]]:
actual = Variable.concat(times, 't')
self.assertEqual(expected.dtype, actual.dtype)
self.assertArrayEqual(expected, actual)
def test_0d_time_data(self):
# regression test for #105
x = self.cls('time', pd.date_range('2000-01-01', periods=5))
expected = np.datetime64('2000-01-01T00Z', 'ns')
self.assertEqual(x[0].values, expected)
def test_datetime64_conversion(self):
times = pd.date_range('2000-01-01', periods=3)
for values, preserve_source in [
(times, True),
(times.values, True),
(times.values.astype('datetime64[s]'), False),
(times.to_pydatetime(), False),
]:
v = self.cls(['t'], values)
self.assertEqual(v.dtype, np.dtype('datetime64[ns]'))
self.assertArrayEqual(v.values, times.values)
self.assertEqual(v.values.dtype, np.dtype('datetime64[ns]'))
same_source = source_ndarray(v.values) is source_ndarray(values)
assert preserve_source == same_source
def test_timedelta64_conversion(self):
times = pd.timedelta_range(start=0, periods=3)
for values, preserve_source in [
(times, True),
(times.values, True),
(times.values.astype('timedelta64[s]'), False),
(times.to_pytimedelta(), False),
]:
v = self.cls(['t'], values)
self.assertEqual(v.dtype, np.dtype('timedelta64[ns]'))
self.assertArrayEqual(v.values, times.values)
self.assertEqual(v.values.dtype, np.dtype('timedelta64[ns]'))
same_source = source_ndarray(v.values) is source_ndarray(values)
assert preserve_source == same_source
def test_object_conversion(self):
data = np.arange(5).astype(str).astype(object)
actual = self.cls('x', data)
self.assertEqual(actual.dtype, data.dtype)
def test_pandas_data(self):
v = self.cls(['x'], pd.Series([0, 1, 2], index=[3, 2, 1]))
self.assertVariableIdentical(v, v[[0, 1, 2]])
v = self.cls(['x'], pd.Index([0, 1, 2]))
self.assertEqual(v[0].values, v.values[0])
def test_pandas_period_index(self):
v = self.cls(['x'], pd.period_range(start='2000', periods=20, freq='B'))
self.assertEqual(v[0], pd.Period('2000', freq='B'))
assert "Period('2000-01-03', 'B')" in repr(v)
def test_1d_math(self):
x = 1.0 * np.arange(5)
y = np.ones(5)
# should we need `.to_base_variable()`?
# probably a break that `+v` changes type?
v = self.cls(['x'], x)
base_v = v.to_base_variable()
# unary ops
self.assertVariableIdentical(base_v, +v)
self.assertVariableIdentical(base_v, abs(v))
self.assertArrayEqual((-v).values, -x)
# binary ops with numbers
self.assertVariableIdentical(base_v, v + 0)
self.assertVariableIdentical(base_v, 0 + v)
self.assertVariableIdentical(base_v, v * 1)
self.assertArrayEqual((v > 2).values, x > 2)
self.assertArrayEqual((0 == v).values, 0 == x)
self.assertArrayEqual((v - 1).values, x - 1)
self.assertArrayEqual((1 - v).values, 1 - x)
# binary ops with numpy arrays
self.assertArrayEqual((v * x).values, x ** 2)
self.assertArrayEqual((x * v).values, x ** 2)
self.assertArrayEqual(v - y, v - 1)
self.assertArrayEqual(y - v, 1 - v)
# verify attributes are dropped
v2 = self.cls(['x'], x, {'units': 'meters'})
self.assertVariableIdentical(base_v, +v2)
# binary ops with all variables
self.assertArrayEqual(v + v, 2 * v)
w = self.cls(['x'], y, {'foo': 'bar'})
self.assertVariableIdentical(v + w, self.cls(['x'], x + y).to_base_variable())
self.assertArrayEqual((v * w).values, x * y)
# something complicated
self.assertArrayEqual((v ** 2 * w - 1 + x).values, x ** 2 * y - 1 + x)
# make sure dtype is preserved (for Index objects)
self.assertEqual(float, (+v).dtype)
self.assertEqual(float, (+v).values.dtype)
self.assertEqual(float, (0 + v).dtype)
self.assertEqual(float, (0 + v).values.dtype)
# check types of returned data
self.assertIsInstance(+v, Variable)
self.assertNotIsInstance(+v, IndexVariable)
self.assertIsInstance(0 + v, Variable)
self.assertNotIsInstance(0 + v, IndexVariable)
def test_1d_reduce(self):
x = np.arange(5)
v = self.cls(['x'], x)
actual = v.sum()
expected = Variable((), 10)
self.assertVariableIdentical(expected, actual)
self.assertIs(type(actual), Variable)
def test_array_interface(self):
x = np.arange(5)
v = self.cls(['x'], x)
self.assertArrayEqual(np.asarray(v), x)
# test patched in methods
self.assertArrayEqual(v.astype(float), x.astype(float))
# think this is a break, that argsort changes the type
self.assertVariableIdentical(v.argsort(), v.to_base_variable())
self.assertVariableIdentical(v.clip(2, 3),
self.cls('x', x.clip(2, 3)).to_base_variable())
# test ufuncs
self.assertVariableIdentical(np.sin(v), self.cls(['x'], np.sin(x)).to_base_variable())
self.assertIsInstance(np.sin(v), Variable)
self.assertNotIsInstance(np.sin(v), IndexVariable)
def example_1d_objects(self):
for data in [range(3),
0.5 * np.arange(3),
0.5 * np.arange(3, dtype=np.float32),
pd.date_range('2000-01-01', periods=3),
np.array(['a', 'b', 'c'], dtype=object)]:
yield (self.cls('x', data), data)
def test___array__(self):
for v, data in self.example_1d_objects():
self.assertArrayEqual(v.values, np.asarray(data))
self.assertArrayEqual(np.asarray(v), np.asarray(data))
self.assertEqual(v[0].values, np.asarray(data)[0])
self.assertEqual(np.asarray(v[0]), np.asarray(data)[0])
def test_equals_all_dtypes(self):
for v, _ in self.example_1d_objects():
v2 = v.copy()
self.assertTrue(v.equals(v2))
self.assertTrue(v.identical(v2))
self.assertTrue(v.no_conflicts(v2))
self.assertTrue(v[0].equals(v2[0]))
self.assertTrue(v[0].identical(v2[0]))
self.assertTrue(v[0].no_conflicts(v2[0]))
self.assertTrue(v[:2].equals(v2[:2]))
self.assertTrue(v[:2].identical(v2[:2]))
self.assertTrue(v[:2].no_conflicts(v2[:2]))
def test_eq_all_dtypes(self):
# ensure that we don't choke on comparisons for which numpy returns
# scalars
expected = Variable('x', 3 * [False])
for v, _ in self.example_1d_objects():
actual = 'z' == v
self.assertVariableIdentical(expected, actual)
actual = ~('z' != v)
self.assertVariableIdentical(expected, actual)
def test_encoding_preserved(self):
expected = self.cls('x', range(3), {'foo': 1}, {'bar': 2})
for actual in [expected.T,
expected[...],
expected.squeeze(),
expected.isel(x=slice(None)),
expected.set_dims({'x': 3}),
expected.copy(deep=True),
expected.copy(deep=False)]:
self.assertVariableIdentical(expected.to_base_variable(),
actual.to_base_variable())
self.assertEqual(expected.encoding, actual.encoding)
def test_concat(self):
x = np.arange(5)
y = np.arange(5, 10)
v = self.cls(['a'], x)
w = self.cls(['a'], y)
self.assertVariableIdentical(Variable(['b', 'a'], np.array([x, y])),
Variable.concat([v, w], 'b'))
self.assertVariableIdentical(Variable(['b', 'a'], np.array([x, y])),
Variable.concat((v, w), 'b'))
self.assertVariableIdentical(Variable(['b', 'a'], np.array([x, y])),
Variable.concat((v, w), 'b'))
with self.assertRaisesRegexp(ValueError, 'inconsistent dimensions'):
Variable.concat([v, Variable(['c'], y)], 'b')
# test indexers
actual = Variable.concat(
[v, w],
positions=[np.arange(0, 10, 2), np.arange(1, 10, 2)],
dim='a')
expected = Variable('a', np.array([x, y]).ravel(order='F'))
self.assertVariableIdentical(expected, actual)
# test concatenating along a dimension
v = Variable(['time', 'x'], np.random.random((10, 8)))
self.assertVariableIdentical(v, Variable.concat([v[:5], v[5:]], 'time'))
self.assertVariableIdentical(v, Variable.concat([v[:5], v[5:6], v[6:]], 'time'))
self.assertVariableIdentical(v, Variable.concat([v[:1], v[1:]], 'time'))
# test dimension order
self.assertVariableIdentical(v, Variable.concat([v[:, :5], v[:, 5:]], 'x'))
with self.assertRaisesRegexp(ValueError, 'all input arrays must have'):
Variable.concat([v[:, 0], v[:, 1:]], 'x')
def test_concat_attrs(self):
# different or conflicting attributes should be removed
v = self.cls('a', np.arange(5), {'foo': 'bar'})
w = self.cls('a', np.ones(5))
expected = self.cls('a', np.concatenate([np.arange(5), np.ones(5)])).to_base_variable()
self.assertVariableIdentical(expected, Variable.concat([v, w], 'a'))
w.attrs['foo'] = 2
self.assertVariableIdentical(expected, Variable.concat([v, w], 'a'))
w.attrs['foo'] = 'bar'
expected.attrs['foo'] = 'bar'
self.assertVariableIdentical(expected, Variable.concat([v, w], 'a'))
def test_concat_fixed_len_str(self):
# regression test for #217
for kind in ['S', 'U']:
x = self.cls('animal', np.array(['horse'], dtype=kind))
y = self.cls('animal', np.array(['aardvark'], dtype=kind))
actual = Variable.concat([x, y], 'animal')
expected = Variable(
'animal', np.array(['horse', 'aardvark'], dtype=kind))
self.assertVariableEqual(expected, actual)
def test_concat_number_strings(self):
# regression test for #305
a = self.cls('x', ['0', '1', '2'])
b = self.cls('x', ['3', '4'])
actual = Variable.concat([a, b], dim='x')
expected = Variable('x', np.arange(5).astype(str).astype(object))
self.assertVariableIdentical(expected, actual)
self.assertEqual(expected.dtype, object)
self.assertEqual(type(expected.values[0]), str)
def test_copy(self):
v = self.cls('x', 0.5 * np.arange(10), {'foo': 'bar'})
for deep in [True, False]:
w = v.copy(deep=deep)
self.assertIs(type(v), type(w))
self.assertVariableIdentical(v, w)
self.assertEqual(v.dtype, w.dtype)
if self.cls is Variable:
if deep:
self.assertIsNot(source_ndarray(v.values),
source_ndarray(w.values))
else:
self.assertIs(source_ndarray(v.values),
source_ndarray(w.values))
self.assertVariableIdentical(v, copy(v))
def test_copy_index(self):
midx = pd.MultiIndex.from_product([['a', 'b'], [1, 2], [-1, -2]],
names=('one', 'two', 'three'))
v = self.cls('x', midx)
for deep in [True, False]:
w = v.copy(deep=deep)
self.assertIsInstance(w._data, PandasIndexAdapter)
self.assertIsInstance(w.to_index(), pd.MultiIndex)
self.assertArrayEqual(v._data.array, w._data.array)
def test_real_and_imag(self):
v = self.cls('x', np.arange(3) - 1j * np.arange(3), {'foo': 'bar'})
expected_re = self.cls('x', np.arange(3), {'foo': 'bar'})
self.assertVariableIdentical(v.real, expected_re)
expected_im = self.cls('x', -np.arange(3), {'foo': 'bar'})
self.assertVariableIdentical(v.imag, expected_im)
expected_abs = self.cls('x', np.sqrt(2 * np.arange(3) ** 2)).to_base_variable()
self.assertVariableAllClose(abs(v), expected_abs)
def test_aggregate_complex(self):
# should skip NaNs
v = self.cls('x', [1, 2j, np.nan])
expected = Variable((), 0.5 + 1j)
self.assertVariableAllClose(v.mean(), expected)
def test_pandas_cateogrical_dtype(self):
data = pd.Categorical(np.arange(10, dtype='int64'))
v = self.cls('x', data)
print(v) # should not error
assert v.dtype == 'int64'
def test_pandas_datetime64_with_tz(self):
data = pd.date_range(start='2000-01-01',
tz=pytz.timezone('America/New_York'),
periods=10, freq='1h')
v = self.cls('x', data)
print(v) # should not error
if 'America/New_York' in str(data.dtype):
# pandas is new enough that it has datetime64 with timezone dtype
assert v.dtype == 'object'
def test_multiindex(self):
idx = pd.MultiIndex.from_product([list('abc'), [0, 1]])
v = self.cls('x', idx)
self.assertVariableIdentical(Variable((), ('a', 0)), v[0])
self.assertVariableIdentical(v, v[:])
def test_load(self):
array = self.cls('x', np.arange(5))
orig_data = array._data
copied = array.copy(deep=True)
array.load()
assert type(array._data) is type(orig_data)
assert type(copied._data) is type(orig_data)
self.assertVariableIdentical(array, copied)
class TestVariable(TestCase, VariableSubclassTestCases):
cls = staticmethod(Variable)
def setUp(self):
self.d = np.random.random((10, 3)).astype(np.float64)
def test_data_and_values(self):
v = Variable(['time', 'x'], self.d)
self.assertArrayEqual(v.data, self.d)
self.assertArrayEqual(v.values, self.d)
self.assertIs(source_ndarray(v.values), self.d)
with self.assertRaises(ValueError):
# wrong size
v.values = np.random.random(5)
d2 = np.random.random((10, 3))
v.values = d2
self.assertIs(source_ndarray(v.values), d2)
d3 = np.random.random((10, 3))
v.data = d3
self.assertIs(source_ndarray(v.data), d3)
def test_numpy_same_methods(self):
v = Variable([], np.float32(0.0))
self.assertEqual(v.item(), 0)
self.assertIs(type(v.item()), float)
v = IndexVariable('x', np.arange(5))
self.assertEqual(2, v.searchsorted(2))
def test_datetime64_conversion_scalar(self):
expected = np.datetime64('2000-01-01T00:00:00Z', 'ns')
for values in [
np.datetime64('2000-01-01T00Z'),
pd.Timestamp('2000-01-01T00'),
datetime(2000, 1, 1),
]:
v = Variable([], values)
self.assertEqual(v.dtype, np.dtype('datetime64[ns]'))
self.assertEqual(v.values, expected)
self.assertEqual(v.values.dtype, np.dtype('datetime64[ns]'))
def test_timedelta64_conversion_scalar(self):
expected = np.timedelta64(24 * 60 * 60 * 10 ** 9, 'ns')
for values in [
np.timedelta64(1, 'D'),
pd.Timedelta('1 day'),
timedelta(days=1),
]:
v = Variable([], values)
self.assertEqual(v.dtype, np.dtype('timedelta64[ns]'))
self.assertEqual(v.values, expected)
self.assertEqual(v.values.dtype, np.dtype('timedelta64[ns]'))
def test_0d_str(self):
v = Variable([], u'foo')
self.assertEqual(v.dtype, np.dtype('U3'))
self.assertEqual(v.values, 'foo')
v = Variable([], np.string_('foo'))
self.assertEqual(v.dtype, np.dtype('S3'))
self.assertEqual(v.values, bytes('foo', 'ascii') if PY3 else 'foo')
def test_0d_datetime(self):
v = Variable([], pd.Timestamp('2000-01-01'))
self.assertEqual(v.dtype, np.dtype('datetime64[ns]'))
self.assertEqual(v.values, np.datetime64('2000-01-01T00Z', 'ns'))
def test_0d_timedelta(self):
for td in [pd.to_timedelta('1s'), np.timedelta64(1, 's')]:
v = Variable([], td)
self.assertEqual(v.dtype, np.dtype('timedelta64[ns]'))
self.assertEqual(v.values, np.timedelta64(10 ** 9, 'ns'))
def test_equals_and_identical(self):
d = np.random.rand(10, 3)
d[0, 0] = np.nan
v1 = Variable(('dim1', 'dim2'), data=d,
attrs={'att1': 3, 'att2': [1, 2, 3]})
v2 = Variable(('dim1', 'dim2'), data=d,
attrs={'att1': 3, 'att2': [1, 2, 3]})
self.assertTrue(v1.equals(v2))
self.assertTrue(v1.identical(v2))
v3 = Variable(('dim1', 'dim3'), data=d)
self.assertFalse(v1.equals(v3))
v4 = Variable(('dim1', 'dim2'), data=d)
self.assertTrue(v1.equals(v4))
self.assertFalse(v1.identical(v4))
v5 = deepcopy(v1)
v5.values[:] = np.random.rand(10, 3)
self.assertFalse(v1.equals(v5))
self.assertFalse(v1.equals(None))
self.assertFalse(v1.equals(d))
self.assertFalse(v1.identical(None))
self.assertFalse(v1.identical(d))
def test_broadcast_equals(self):
v1 = Variable((), np.nan)
v2 = Variable(('x'), [np.nan, np.nan])
self.assertTrue(v1.broadcast_equals(v2))
self.assertFalse(v1.equals(v2))
self.assertFalse(v1.identical(v2))
v3 = Variable(('x'), [np.nan])
self.assertTrue(v1.broadcast_equals(v3))
self.assertFalse(v1.equals(v3))
self.assertFalse(v1.identical(v3))
self.assertFalse(v1.broadcast_equals(None))
v4 = Variable(('x'), [np.nan] * 3)
self.assertFalse(v2.broadcast_equals(v4))
def test_no_conflicts(self):
v1 = Variable(('x'), [1, 2, np.nan, np.nan])
v2 = Variable(('x'), [np.nan, 2, 3, np.nan])
self.assertTrue(v1.no_conflicts(v2))
self.assertFalse(v1.equals(v2))
self.assertFalse(v1.broadcast_equals(v2))
self.assertFalse(v1.identical(v2))
self.assertFalse(v1.no_conflicts(None))
v3 = Variable(('y'), [np.nan, 2, 3, np.nan])
self.assertFalse(v3.no_conflicts(v1))
d = np.array([1, 2, np.nan, np.nan])
self.assertFalse(v1.no_conflicts(d))
self.assertFalse(v2.no_conflicts(d))
v4 = Variable(('w', 'x'), [d])
self.assertTrue(v1.no_conflicts(v4))
def test_as_variable(self):
data = np.arange(10)
expected = Variable('x', data)
self.assertVariableIdentical(expected, as_variable(expected))
ds = Dataset({'x': expected})
self.assertVariableIdentical(expected, as_variable(ds['x']).to_base_variable())
self.assertNotIsInstance(ds['x'], Variable)
self.assertIsInstance(as_variable(ds['x']), Variable)
FakeVariable = namedtuple('FakeVariable', 'values dims')
fake_xarray = FakeVariable(expected.values, expected.dims)
self.assertVariableIdentical(expected, as_variable(fake_xarray))
xarray_tuple = (expected.dims, expected.values)
self.assertVariableIdentical(expected, as_variable(xarray_tuple))
with self.assertRaisesRegexp(TypeError, 'tuples to convert'):
as_variable(tuple(data))
with self.assertRaisesRegexp(
TypeError, 'without an explicit list of dimensions'):
as_variable(data)
actual = as_variable(data, name='x')
self.assertVariableIdentical(expected.to_index_variable(), actual)
actual = as_variable(0)
expected = Variable([], 0)
self.assertVariableIdentical(expected, actual)
def test_repr(self):
v = Variable(['time', 'x'], [[1, 2, 3], [4, 5, 6]], {'foo': 'bar'})
expected = dedent("""
<xarray.Variable (time: 2, x: 3)>
array([[1, 2, 3],
[4, 5, 6]])
Attributes:
foo: bar
""").strip()
self.assertEqual(expected, repr(v))
def test_repr_lazy_data(self):
v = Variable('x', LazilyIndexedArray(np.arange(2e5)))
self.assertIn('200000 values with dtype', repr(v))
self.assertIsInstance(v._data, LazilyIndexedArray)
def test_items(self):
data = np.random.random((10, 11))
v = Variable(['x', 'y'], data)
# test slicing
self.assertVariableIdentical(v, v[:])
self.assertVariableIdentical(v, v[...])
self.assertVariableIdentical(Variable(['y'], data[0]), v[0])
self.assertVariableIdentical(Variable(['x'], data[:, 0]), v[:, 0])
self.assertVariableIdentical(Variable(['x', 'y'], data[:3, :2]),
v[:3, :2])
# test array indexing
x = Variable(['x'], np.arange(10))
y = Variable(['y'], np.arange(11))
self.assertVariableIdentical(v, v[x.values])
self.assertVariableIdentical(v, v[x])
self.assertVariableIdentical(v[:3], v[x < 3])
self.assertVariableIdentical(v[:, 3:], v[:, y >= 3])
self.assertVariableIdentical(v[:3, 3:], v[x < 3, y >= 3])
self.assertVariableIdentical(v[:3, :2], v[x[:3], y[:2]])
self.assertVariableIdentical(v[:3, :2], v[range(3), range(2)])
# test iteration
for n, item in enumerate(v):
self.assertVariableIdentical(Variable(['y'], data[n]), item)
with self.assertRaisesRegexp(TypeError, 'iteration over a 0-d'):
iter(Variable([], 0))
# test setting
v.values[:] = 0
self.assertTrue(np.all(v.values == 0))
# test orthogonal setting
v[range(10), range(11)] = 1
self.assertArrayEqual(v.values, np.ones((10, 11)))
def test_isel(self):
v = Variable(['time', 'x'], self.d)
self.assertVariableIdentical(v.isel(time=slice(None)), v)
self.assertVariableIdentical(v.isel(time=0), v[0])
self.assertVariableIdentical(v.isel(time=slice(0, 3)), v[:3])
self.assertVariableIdentical(v.isel(x=0), v[:, 0])
with self.assertRaisesRegexp(ValueError, 'do not exist'):
v.isel(not_a_dim=0)
def test_index_0d_numpy_string(self):
# regression test to verify our work around for indexing 0d strings
v = Variable([], np.string_('asdf'))
self.assertVariableIdentical(v[()], v)
v = Variable([], np.unicode_(u'asdf'))
self.assertVariableIdentical(v[()], v)
def test_indexing_0d_unicode(self):
# regression test for GH568
actual = Variable(('x'), [u'tmax'])[0][()]
expected = Variable((), u'tmax')
self.assertVariableIdentical(actual, expected)
def test_shift(self):
v = Variable('x', [1, 2, 3, 4, 5])
self.assertVariableIdentical(v, v.shift(x=0))
self.assertIsNot(v, v.shift(x=0))
expected = Variable('x', [np.nan, 1, 2, 3, 4])
self.assertVariableIdentical(expected, v.shift(x=1))
expected = Variable('x', [np.nan, np.nan, 1, 2, 3])
self.assertVariableIdentical(expected, v.shift(x=2))
expected = Variable('x', [2, 3, 4, 5, np.nan])
self.assertVariableIdentical(expected, v.shift(x=-1))
expected = Variable('x', [np.nan] * 5)
self.assertVariableIdentical(expected, v.shift(x=5))
self.assertVariableIdentical(expected, v.shift(x=6))
with self.assertRaisesRegexp(ValueError, 'dimension'):
v.shift(z=0)
v = Variable('x', [1, 2, 3, 4, 5], {'foo': 'bar'})
self.assertVariableIdentical(v, v.shift(x=0))
expected = Variable('x', [np.nan, 1, 2, 3, 4], {'foo': 'bar'})
self.assertVariableIdentical(expected, v.shift(x=1))
def test_shift2d(self):
v = Variable(('x', 'y'), [[1, 2], [3, 4]])
expected = Variable(('x', 'y'), [[np.nan, np.nan], [np.nan, 1]])
self.assertVariableIdentical(expected, v.shift(x=1, y=1))
def test_roll(self):
v = Variable('x', [1, 2, 3, 4, 5])
self.assertVariableIdentical(v, v.roll(x=0))
self.assertIsNot(v, v.roll(x=0))
expected = Variable('x', [5, 1, 2, 3, 4])
self.assertVariableIdentical(expected, v.roll(x=1))
self.assertVariableIdentical(expected, v.roll(x=-4))
self.assertVariableIdentical(expected, v.roll(x=6))
expected = Variable('x', [4, 5, 1, 2, 3])
self.assertVariableIdentical(expected, v.roll(x=2))
self.assertVariableIdentical(expected, v.roll(x=-3))
with self.assertRaisesRegexp(ValueError, 'dimension'):
v.roll(z=0)
def test_roll_consistency(self):
v = Variable(('x', 'y'), np.random.randn(5, 6))
for axis, dim in [(0, 'x'), (1, 'y')]:
for shift in [-3, 0, 1, 7, 11]:
expected = np.roll(v.values, shift, axis=axis)
actual = v.roll(**{dim: shift}).values
self.assertArrayEqual(expected, actual)
def test_transpose(self):
v = Variable(['time', 'x'], self.d)
v2 = Variable(['x', 'time'], self.d.T)
self.assertVariableIdentical(v, v2.transpose())
self.assertVariableIdentical(v.transpose(), v.T)
x = np.random.randn(2, 3, 4, 5)
w = Variable(['a', 'b', 'c', 'd'], x)
w2 = Variable(['d', 'b', 'c', 'a'], np.einsum('abcd->dbca', x))
self.assertEqual(w2.shape, (5, 3, 4, 2))
self.assertVariableIdentical(w2, w.transpose('d', 'b', 'c', 'a'))
self.assertVariableIdentical(w, w2.transpose('a', 'b', 'c', 'd'))
w3 = Variable(['b', 'c', 'd', 'a'], np.einsum('abcd->bcda', x))
self.assertVariableIdentical(w, w3.transpose('a', 'b', 'c', 'd'))
def test_transpose_0d(self):
for value in [
3.5,
('a', 1),
np.datetime64('2000-01-01'),
np.timedelta64(1, 'h'),
None,
object(),
]:
variable = Variable([], value)
actual = variable.transpose()
assert actual.identical(variable)
def test_squeeze(self):
v = Variable(['x', 'y'], [[1]])
self.assertVariableIdentical(Variable([], 1), v.squeeze())
self.assertVariableIdentical(Variable(['y'], [1]), v.squeeze('x'))
self.assertVariableIdentical(Variable(['y'], [1]), v.squeeze(['x']))
self.assertVariableIdentical(Variable(['x'], [1]), v.squeeze('y'))
self.assertVariableIdentical(Variable([], 1), v.squeeze(['x', 'y']))
v = Variable(['x', 'y'], [[1, 2]])
self.assertVariableIdentical(Variable(['y'], [1, 2]), v.squeeze())
self.assertVariableIdentical(Variable(['y'], [1, 2]), v.squeeze('x'))
with self.assertRaisesRegexp(ValueError, 'cannot select a dimension'):
v.squeeze('y')
def test_get_axis_num(self):
v = Variable(['x', 'y', 'z'], np.random.randn(2, 3, 4))
self.assertEqual(v.get_axis_num('x'), 0)
self.assertEqual(v.get_axis_num(['x']), (0,))
self.assertEqual(v.get_axis_num(['x', 'y']), (0, 1))
self.assertEqual(v.get_axis_num(['z', 'y', 'x']), (2, 1, 0))
with self.assertRaisesRegexp(ValueError, 'not found in array dim'):
v.get_axis_num('foobar')
def test_set_dims(self):
v = Variable(['x'], [0, 1])
actual = v.set_dims(['x', 'y'])
expected = Variable(['x', 'y'], [[0], [1]])
self.assertVariableIdentical(actual, expected)
actual = v.set_dims(['y', 'x'])
self.assertVariableIdentical(actual, expected.T)
actual = v.set_dims(OrderedDict([('x', 2), ('y', 2)]))
expected = Variable(['x', 'y'], [[0, 0], [1, 1]])
self.assertVariableIdentical(actual, expected)
v = Variable(['foo'], [0, 1])
actual = v.set_dims('foo')
expected = v
self.assertVariableIdentical(actual, expected)
with self.assertRaisesRegexp(ValueError, 'must be a superset'):
v.set_dims(['z'])
def test_set_dims_object_dtype(self):
v = Variable([], ('a', 1))
actual = v.set_dims(('x',), (3,))
exp_values = np.empty((3,), dtype=object)
for i in range(3):
exp_values[i] = ('a', 1)
expected = Variable(['x'], exp_values)
assert actual.identical(expected)
def test_stack(self):
v = Variable(['x', 'y'], [[0, 1], [2, 3]], {'foo': 'bar'})
actual = v.stack(z=('x', 'y'))
expected = Variable('z', [0, 1, 2, 3], v.attrs)
self.assertVariableIdentical(actual, expected)
actual = v.stack(z=('x',))
expected = Variable(('y', 'z'), v.data.T, v.attrs)
self.assertVariableIdentical(actual, expected)
actual = v.stack(z=(),)
self.assertVariableIdentical(actual, v)
actual = v.stack(X=('x',), Y=('y',)).transpose('X', 'Y')
expected = Variable(('X', 'Y'), v.data, v.attrs)
self.assertVariableIdentical(actual, expected)
def test_stack_errors(self):
v = Variable(['x', 'y'], [[0, 1], [2, 3]], {'foo': 'bar'})
with self.assertRaisesRegexp(ValueError, 'invalid existing dim'):
v.stack(z=('x1',))
with self.assertRaisesRegexp(ValueError, 'cannot create a new dim'):
v.stack(x=('x',))
def test_unstack(self):
v = Variable('z', [0, 1, 2, 3], {'foo': 'bar'})
actual = v.unstack(z=OrderedDict([('x', 2), ('y', 2)]))
expected = Variable(('x', 'y'), [[0, 1], [2, 3]], v.attrs)
self.assertVariableIdentical(actual, expected)
actual = v.unstack(z=OrderedDict([('x', 4), ('y', 1)]))
expected = Variable(('x', 'y'), [[0], [1], [2], [3]], v.attrs)
self.assertVariableIdentical(actual, expected)
actual = v.unstack(z=OrderedDict([('x', 4)]))
expected = Variable('x', [0, 1, 2, 3], v.attrs)
self.assertVariableIdentical(actual, expected)
def test_unstack_errors(self):
v = Variable('z', [0, 1, 2, 3])
with self.assertRaisesRegexp(ValueError, 'invalid existing dim'):
v.unstack(foo={'x': 4})
with self.assertRaisesRegexp(ValueError, 'cannot create a new dim'):
v.stack(z=('z',))
with self.assertRaisesRegexp(ValueError, 'the product of the new dim'):
v.unstack(z={'x': 5})
def test_unstack_2d(self):
v = Variable(['x', 'y'], [[0, 1], [2, 3]])
actual = v.unstack(y={'z': 2})
expected = Variable(['x', 'z'], v.data)
self.assertVariableIdentical(actual, expected)
actual = v.unstack(x={'z': 2})
expected = Variable(['y', 'z'], v.data.T)
self.assertVariableIdentical(actual, expected)
def test_stack_unstack_consistency(self):
v = Variable(['x', 'y'], [[0, 1], [2, 3]])
actual = (v.stack(z=('x', 'y'))
.unstack(z=OrderedDict([('x', 2), ('y', 2)])))
self.assertVariableIdentical(actual, v)
def test_broadcasting_math(self):
x = np.random.randn(2, 3)
v = Variable(['a', 'b'], x)
# 1d to 2d broadcasting
self.assertVariableIdentical(
v * v,
Variable(['a', 'b'], np.einsum('ab,ab->ab', x, x)))
self.assertVariableIdentical(
v * v[0],
Variable(['a', 'b'], np.einsum('ab,b->ab', x, x[0])))
self.assertVariableIdentical(
v[0] * v,
Variable(['b', 'a'], | np.einsum('b,ab->ba', x[0], x) | numpy.einsum |
import tensorflow as tf
from tensorflow.python.framework import ops
import sys
import os
import matplotlib.pyplot as plt
import random
from mpl_toolkits.mplot3d import Axes3D
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(BASE_DIR)
devox_module = tf.load_op_library(os.path.join(BASE_DIR, 'tf_devoxelize.so'))
sys.path.append('../voxelization')
from tf_vox import group_voxel, voxelize, avg_voxel
sys.path.append('../../utils')
from tf_devox import trilinear_devoxelize
import tf_util
from plyfile import PlyData, PlyElement
sys.path.append(os.path.join(BASE_DIR, '..', 'tf_ops/pc_distance'))
import tf_nn_distance
tf.enable_eager_execution()
def get_cd_loss(pred, pc):
""" pred: BxNx3,
label: BxN, """
dists_forward,_,dists_backward,_ = tf_nn_distance.nn_distance(pred, pc)
# loss = tf.reduce_mean(dists_forward+dists_backward)
loss = (tf.reduce_mean(tf.sqrt(dists_forward)) + tf.reduce_mean(tf.sqrt(dists_backward)))/2
return loss
def write_ply(tensor, name):
np.savetxt(name, np.squeeze(tensor.numpy().transpose(0, 2, 1)))
len = tensor.numpy().shape[2]
file = os.path.join('.', name)
f = open(file, "r+")
lines = [line.lstrip().rstrip().replace(' ', ' ') for line in f]
vertex_nums, face_nums, _ = lines[1].split()
f = open(file, "w+")
f.seek(0)
head = "ply\nformat ascii 1.0\ncomment VCGLIB generated\nelement vertex " + '2048' + "\nproperty float x\nproperty float y\nproperty float z\nelement face " + str(0) + "\nproperty list uchar int vertex_indices\nend_header\n"
f.write(head)
for line in lines[:]:
f.write(line + "\n")
f.close()
if __name__ == '__main__':
import numpy as np
import time
for file in os.listdir('../../data'):
file = os.path.join('../../data',file)
plydata = PlyData.read(file)
a_data = []
for i in range(plydata['vertex'].count):
line = [plydata['vertex']['x'][i], plydata['vertex']['y'][i], plydata['vertex']['z'][i]]
a_data.append(line)
pc = | np.array(a_data) | numpy.array |
import numpy as np
import statistics
import matplotlib.pyplot as plt
import seaborn as sns
np_normal_dis = np.random.normal(5, 0.5, 100)
print(np_normal_dis.min())
print(np_normal_dis.max())
print(np_normal_dis.mean())
# print(np_normal_dis.median()) # not working
print(np_normal_dis.std())
two_dimension_array = np.array([(1,2,3), [4,5,6]])
print(two_dimension_array)
print('Max row:', np.amax(two_dimension_array, axis=0))
print('Max column:', np.amax(two_dimension_array, axis=1))
print('Min column:', np.amin(two_dimension_array, axis=0))
print('Min column:', np.amin(two_dimension_array, axis=1))
a = [1, 2, 3]
print('Tile:', np.tile(a, 2)) # just repeat a list
print('Repeat:', np.repeat(a, 2)) # sort repeat
print(np.random.random()) # between 0 - 1
r = np.random.random(size=[2, 3]) # 2 row 3 column
print(r)
print( | np.random.choice(['a', 'e', 'i', 'o', 'u'], size=10) | numpy.random.choice |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Oct 14 21:31:56 2017
@author: Franz
"""
import scipy.signal
import numpy as np
import scipy.io as so
import os.path
import re
import matplotlib.pylab as plt
import h5py
import matplotlib.patches as patches
import numpy.random as rand
import seaborn as sns
import pandas as pd
from functools import reduce
import random
import pdb
class Mouse :
def __init__(self, idf, list=None, typ='') :
self.recordings = []
self.recordings.append(list)
self.typ = typ
self.idf = idf
def add(self, rec) :
self.recordings.append(rec)
def __len__(self) :
return len(self.recordings)
def __repr__(self) :
return ", ".join(self.recordings)
### PROCESSING OF RECORDING DATA ##############################################
def load_stateidx(ppath, name, ann_name=''):
""" load the sleep state file of recording (folder) $ppath/$name
@Return:
M,K sequence of sleep states, sequence of
0'1 and 1's indicating non- and annotated states
"""
ddir = os.path.join(ppath, name)
ppath, name = os.path.split(ddir)
if ann_name == '':
ann_name = name
sfile = os.path.join(ppath, name, 'remidx_' + ann_name + '.txt')
f = open(sfile, 'r')
lines = f.readlines()
f.close()
n = 0
for l in lines:
if re.match('\d', l):
n += 1
M = np.zeros(n, dtype='int')
K = np.zeros(n, dtype='int')
i = 0
for l in lines :
if re.search('^\s+$', l) :
continue
if re.search('\s*#', l) :
continue
if re.match('\d+\s+-?\d+', l) :
a = re.split('\s+', l)
M[i] = int(a[0])
K[i] = int(a[1])
i += 1
return M,K
def load_recordings(ppath, rec_file) :
"""
load_recordings(ppath, rec_file)
load recording listing with syntax:
[E|C] \s+ recording_name
#COMMENT
@RETURN:
(list of controls, lis of experiments)
"""
exp_list = []
ctr_list = []
rfile = os.path.join(ppath, rec_file)
f = open(rfile, newline=None)
lines = f.readlines()
f.close()
for l in lines :
if re.search('^\s+$', l) :
continue
if re.search('^\s*#', l) :
continue
a = re.split('\s+', l)
if re.search('E', a[0]) :
exp_list.append(a[1])
if re.search('C', a[0]) :
ctr_list.append(a[1])
return ctr_list, exp_list
def load_dose_recordings(ppath, rec_file):
"""
load recording list with following syntax:
A line is either control or experiments; Control recordings look like:
C \s recording_name
Experimental recordings also come with an additional dose parameter
(allowing for comparison of multiple doses with controls)
E \s recording_name \s dose_1
E \s recording_name \s dose_2
"""
rfile = os.path.join(ppath, rec_file)
f = open(rfile, newline=None)
lines = f.readlines()
f.close()
# first get all potential doses
doses = {}
ctr_list = []
for l in lines :
if re.search('^\s+$', l):
continue
if re.search('^\s*#', l):
continue
a = re.split('\s+', l)
if re.search('E', a[0]):
if a[2] in doses:
doses[a[2]].append(a[1])
else:
doses[a[2]] = [a[1]]
if re.search('C', a[0]):
ctr_list.append(a[1])
return ctr_list, doses
def get_snr(ppath, name):
"""
read and return sampling rate (SR) from file $ppath/$name/info.txt
"""
fid = open(os.path.join(ppath, name, 'info.txt'), newline=None)
lines = fid.readlines()
fid.close()
values = []
for l in lines :
a = re.search("^" + 'SR' + ":" + "\s+(.*)", l)
if a :
values.append(a.group(1))
return float(values[0])
def get_infoparam(ifile, field):
"""
NOTE: field is a single string
and the function does not check for the type
of the values for field.
In fact, it just returns the string following field
"""
fid = open(ifile, newline=None)
lines = fid.readlines()
fid.close()
values = []
for l in lines :
a = re.search("^" + field + ":" + "\s+(.*)", l)
if a :
values.append(a.group(1))
return values
def add_infoparam(ifile, field, vals):
"""
:param ifile: info file
:param field: Parameters specifier, e.g. 'SR'
:param vals: list with parameters
"""
fid = open(ifile, 'a')
vals = [str(s) for s in vals]
param = " ".join(vals)
fid.write('%s:\t%s' % (field, param))
fid.write(os.linesep)
fid.close()
def laser_start_end(laser, SR=1525.88, intval=5):
"""laser_start_end(ppath, name)
print start and end index of laser stimulation trains: For example,
if you was stimulated for 2min every 20 min with 20 Hz, return the
start and end index of the each 2min stimulation period (train)
returns the tuple (istart, iend), both indices are inclusive,
i.e. part of the sequence
@Param:
laser - laser, vector of 0s and 1s
intval - minimum time separation [s] between two laser trains
@Return:
(istart, iend) - tuple of two np.arrays with laser start and end indices
"""
idx = np.where(laser > 0.5)[0]
if len(idx) == 0 :
return ([], [])
idx2 = np.nonzero(np.diff(idx)*(1./SR) > intval)[0]
istart = np.hstack([idx[0], idx[idx2+1]])
iend = np.hstack([idx[idx2], idx[-1]])
return (istart, iend)
def load_laser(ppath, name):
"""
load laser from recording ppath/name
@RETURN:
@laser, vector of 0's and 1's
"""
# laser might be .mat or h5py file
# perhaps we could find a better way of testing that
file = os.path.join(ppath, name, 'laser_'+name+'.mat')
try:
laser = np.array(h5py.File(file,'r').get('laser'))
except:
laser = so.loadmat(file)['laser']
return np.squeeze(laser)
def laser_protocol(ppath, name):
"""
What was the stimulation frequency and the inter-stimulation interval for recording
$ppath/$name?
@Return:
iinter-stimulation intervals, avg. inter-stimulation interval, frequency
"""
laser = load_laser(ppath, name)
SR = get_snr(ppath, name)
# first get inter-stimulation interval
(istart, iend) = laser_start_end(laser, SR)
intv = np.diff(np.array(istart/float(SR)))
d = intv/60.0
print("The laser was turned on in average every %.2f min," % (np.mean(d)))
print("with a min. interval of %.2f min and max. interval of %.2f min." % (np.min(d), np.max(d)))
print("Laser stimulation lasted for %f s." % (np.mean(np.array(iend/float(SR)-istart/float(SR)).mean())))
# print laser start times
print("Start time of each laser trial:")
j=1
for t in istart:
print("trial %d: %.2f" % (j, (t / float(SR)) / 60))
j += 1
# for each laser stimulation interval, check laser stimulation frequency
dt = 1/float(SR)
freq = []
laser_up = []
laser_down = []
for (i,j) in zip(istart, iend):
part = laser[i:j+1]
(a,b) = laser_start_end(part, SR, 0.005)
dur = (j-i+1)*dt
freq.append(len(a) / dur)
up_dur = (b-a+1)*dt*1000
down_dur = (a[1:]-b[0:-1]-1)*dt*1000
laser_up.append(np.mean(up_dur))
laser_down.append(np.mean(down_dur))
print(os.linesep + "Laser stimulation freq. was %.2f Hz," % np.mean(np.array(freq)))
print("with laser up and down duration of %.2f and %.2f ms." % (np.mean(np.array(laser_up)), np.mean(np.array(laser_down))))
return d, np.mean(d), np.mean(np.array(freq))
def swap_eeg(ppath, rec, ch='EEG'):
"""
swap EEG and EEG2 or EMG with EMG2 if $ch='EMG'
"""
if ch == 'EEG':
name = 'EEG'
else:
name = ch
EEG = so.loadmat(os.path.join(ppath, rec, name+'.mat'))[name]
EEG2 = so.loadmat(os.path.join(ppath, rec, name+'2.mat'))[name + '2']
tmp = EEG
EEG = EEG2
EEG2 = tmp
file_eeg1 = os.path.join(ppath, rec, '%s.mat' % name)
file_eeg2 = os.path.join(ppath, rec, '%s2.mat' % name)
so.savemat(file_eeg1, {name : EEG})
so.savemat(file_eeg2, {name+'2' : EEG2})
def eeg_conversion(ppath, rec, conv_factor=0.195):
"""
multiply all EEG and EMG channels with the given
conversion factor and write the conversion factor
as parameter (conversion:) into the info file.
Only if there's no conversion factor in the info file
specified, the conversion will be executed
:param ppath: base filder
:param rec: recording
:param conv_factor: conversion factor
:return: n/s
"""
ifile = os.path.join(ppath, rec, 'info.txt')
conv = get_infoparam(ifile, 'conversion')
if len(conv) > 0:
print("found conversion: parameter in info file")
print("returning: no conversion necessary!!!")
return
else:
files = os.listdir(os.path.join(ppath, rec))
files = [f for f in files if re.match('^EEG', f)]
for f in files:
name = re.split('\.', f)[0]
EEG = so.loadmat(os.path.join(ppath, rec, name+'.mat'), squeeze_me=True)[name]
if EEG[0].dtype == 'int16':
EEG = EEG * conv_factor
file_eeg = os.path.join(ppath, rec, '%s.mat' % name)
print(file_eeg)
so.savemat(file_eeg, {name: EEG})
else:
print('Wrong datatype! probably already converted; returning...')
return
files = os.listdir(os.path.join(ppath, rec))
files = [f for f in files if re.match('^EMG', f)]
for f in files:
name = re.split('\.', f)[0]
EMG = so.loadmat(os.path.join(ppath, rec, name+'.mat'), squeeze_me=True)[name]
if EMG[0].dtype == 'int16':
EMG = EMG * conv_factor
file_emg = os.path.join(ppath, rec, '%s.mat' % name)
print(file_emg)
so.savemat(file_emg, {name: EMG})
else:
print('Wrong datatype! probably already converted; returning...')
return
add_infoparam(ifile, 'conversion', [conv_factor])
calculate_spectrum(ppath, rec)
### DEPRICATED ############################################
def video_pulse_detection(ppath, rec, SR=1000, iv = 0.01):
"""
return index of each video frame onset
ppath/rec - recording
@Optional
SR - sampling rate of EEG(!) recording
iv - minimum time inverval (in seconds) between two frames
@Return
index of each video frame onset
"""
V = np.squeeze(so.loadmat(os.path.join(ppath, rec, 'videotime_' + rec + '.mat'))['video'])
TS = np.arange(0, len(V))
# indices where there's a jump in the signal
t = TS[np.where(V<0.5)];
if len(t) == 0:
idx = []
return idx
# time points where the interval between jumps is longer than iv
t2 = np.where(np.diff(t)*(1.0/SR)>=iv)[0]
idx = np.concatenate(([t[0]],t[t2+1]))
return idx
# SIGNAL PROCESSING ###########################################################
def my_lpfilter(x, w0, N=4):
"""
create a lowpass Butterworth filter with a cutoff of w0 * the Nyquist rate.
The nice thing about this filter is that is has zero-phase distortion.
A conventional lowpass filter would introduce a phase lag.
w0 - filter cutoff; value between 0 and 1, where 1 corresponds to nyquist frequency.
So if you want a filter with cutoff at x Hz, the corresponding w0 value is given by
w0 = 2 * x / sampling_rate
N - order of filter
@Return:
low-pass filtered signal
See also my hp_filter, or my_bpfilter
"""
from scipy import signal
b,a = signal.butter(N, w0)
y = signal.filtfilt(b,a, x)
return y
def my_hpfilter(x, w0, N=4):
"""
create an N-th order highpass Butterworth filter with cutoff frequency w0 * sampling_rate/2
"""
from scipy import signal
# use scipy.signal.firwin to generate filter
#taps = signal.firwin(numtaps, w0, pass_zero=False)
#y = signal.lfilter(taps, 1.0, x)
b,a = signal.butter(N, w0, 'high')
y = signal.filtfilt(b,a, x, padlen = x.shape[0]-1)
return y
def my_bpfilter(x, w0, w1, N=4,bf=True):
"""
create N-th order bandpass Butterworth filter with corner frequencies
w0*sampling_rate/2 and w1*sampling_rate/2
"""
#from scipy import signal
#taps = signal.firwin(numtaps, w0, pass_zero=False)
#y = signal.lfilter(taps, 1.0, x)
#return y
from scipy import signal
b,a = signal.butter(N, [w0, w1], 'bandpass')
if bf:
y = signal.filtfilt(b,a, x)
else:
y = signal.lfilter(b,a, x)
return y
def my_notchfilter(x, sr=1000, band=5, freq=60, ripple=10, order=3, filter_type='butter'):
from scipy.signal import iirfilter,lfilter
fs = sr
nyq = fs/2.0
low = freq - band/2.0
high = freq + band/2.0
low = low/nyq
high = high/nyq
b, a = iirfilter(order, [low, high], rp=ripple, btype='bandstop',
analog=False, ftype=filter_type)
filtered_data = lfilter(b, a, x)
return filtered_data
def downsample_vec(x, nbin):
"""
y = downsample_vec(x, nbin)
downsample the vector x by replacing nbin consecutive \
bin by their mean \
@RETURN: the downsampled vector
"""
n_down = int(np.floor(len(x) / nbin))
x = x[0:n_down*nbin]
x_down = np.zeros((n_down,))
# 0 1 2 | 3 4 5 | 6 7 8
for i in range(nbin) :
idx = list(range(i, int(n_down*nbin), int(nbin)))
x_down += x[idx]
return x_down / nbin
def smooth_data(x, sig):
"""
y = smooth_data(x, sig)
smooth data vector @x with gaussian kernel
with standard deviation $sig
"""
sig = float(sig)
if sig == 0.0:
return x
# gaussian:
gauss = lambda x, sig : (1/(sig*np.sqrt(2.*np.pi)))*np.exp(-(x*x)/(2.*sig*sig))
bound = 1.0/10000
L = 10.
p = gauss(L, sig)
while (p > bound):
L = L+10
p = gauss(L, sig)
#F = map(lambda x: gauss((x, sig)), np.arange(-L, L+1.))
# py3:
F = [gauss(x, sig) for x in np.arange(-L, L+1.)]
F = F / np.sum(F)
return scipy.signal.fftconvolve(x, F, 'same')
def power_spectrum(data, length, dt):
"""
scipy's implementation of Welch's method using hanning window to estimate
the power spectrum
The function returns power density with units V**2/Hz
see also https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.signal.welch.html
The label on the y-axis should say PSD [V**2/Hz]
@Parameters
data - time series; float vector!
length - length of hanning window, even integer!
@Return:
power density, frequencies
The function returns power density in units V^2 / Hz
Note that
np.var(data) ~ np.sum(power density) * (frequencies[1]-frequencies[0])
"""
f, pxx = scipy.signal.welch(data, fs=1.0/dt, window='hanning', nperseg=int(length), noverlap=int(length/2))
return pxx, f
def spectral_density(data, length, nfft, dt):
"""
calculate the spectrogram for the time series given by data with time resolution dt
The powerspectrum for each window of length $length is computed using
Welch's method.
The windows for the powerspectrum calculation are half-overlapping. If length contains 5s of data,
then the first windows goes from 0s to 5s, the second window from 2.5 to 7.5s, ...
The last window ends at ceil(len(data)/length)*5s
Another example, assume we have 13 s of data, with 5 s windows, the the powerdensity is calculated for the following
time windows:
0 -- 5, 2.5 -- 7.5, 5 -- 10, 7.5 -- 12.5, 10 -- 15
In total there are thus 2*ceil(13/5)-1 = 5 windows
The last window starts at 2*3-2 * (5/2) = 10 s
Note: the returned time axis starts at time point goes from 0 to 10s in 2.5s steps
@Parameters:
data - time series
length - window length of data used to calculate powerspectrum.
Note that the time resolution of the spectrogram is length/2
nfft - size of the window used to calculate the powerspectrum.
determines the frequency resolution.
@Return:
Powspectrum, frequencies, time axis
"""
n = len(data)
k = int(np.ceil((1.0*n)/length))
data = np.concatenate((data, np.zeros((length*k-n,))))
fdt = length*dt/2 # time step for spectrogram
t = np.arange(0, fdt*(2*k-2)+fdt/2.0, fdt)
# frequency axis of spectrogram
f = np.linspace(0, 1, int(np.ceil(nfft/2.0))+1) * (0.5/dt)
# the power spectrum is calculated for 2*k-1 time points
Pow = np.zeros((len(f), k*2-1))
j = 0
for i in range(0, k-2+1):
w1=data[(length*i):(i+1)*length]
w2=data[length*i+int(length/2):(i+1)*length+int(length/2)]
Pow[:,j] = power_spectrum(w1, nfft, dt)[0]
Pow[:,j+1] = power_spectrum(w2, nfft, dt)[0]
j += 2
# last time point
Pow[:,j],f = power_spectrum(data[length*(k-1):k*length], nfft, dt)
return Pow, f, t
def calculate_spectrum(ppath, name, fres=0.5):
"""
calculate EEG and EMG spectrogram used for sleep stage detection.
Function assumes that data vectors EEG.mat and EMG.mat exist in recording
folder ppath/name; these are used to calculate the powerspectrum
fres - resolution of frequency axis
all data saved in "true" mat files
:return EEG Spectrogram, EMG Spectrogram, frequency axis, time axis
"""
SR = get_snr(ppath, name)
swin = round(SR)*5
fft_win = round(swin/5) # approximate number of data points per second
if (fres == 1.0) or (fres == 1):
fft_win = int(fft_win)
elif fres == 0.5:
fft_win = 2*int(fft_win)
else:
print("Resolution %f not allowed; please use either 1 or 0.5" % fres)
(peeg2, pemg2) = (False, False)
# Calculate EEG spectrogram
EEG = np.squeeze(so.loadmat(os.path.join(ppath, name, 'EEG.mat'))['EEG'])
Pxx, f, t = spectral_density(EEG, int(swin), int(fft_win), 1/SR)
if os.path.isfile(os.path.join(ppath, name, 'EEG2.mat')):
peeg2 = True
EEG = np.squeeze(so.loadmat(os.path.join(ppath, name, 'EEG2.mat'))['EEG2'])
Pxx2, f, t = spectral_density(EEG, int(swin), int(fft_win), 1/SR)
#save the stuff to a .mat file
spfile = os.path.join(ppath, name, 'sp_' + name + '.mat')
if peeg2 == True:
so.savemat(spfile, {'SP':Pxx, 'SP2':Pxx2, 'freq':f, 'dt':t[1]-t[0],'t':t})
else:
so.savemat(spfile, {'SP':Pxx, 'freq':f, 'dt':t[1]-t[0],'t':t})
# Calculate EMG spectrogram
EMG = np.squeeze(so.loadmat(os.path.join(ppath, name, 'EMG.mat'))['EMG'])
Qxx, f, t = spectral_density(EMG, int(swin), int(fft_win), 1/SR)
if os.path.isfile(os.path.join(ppath, name, 'EMG2.mat')):
pemg2 = True
EMG = np.squeeze(so.loadmat(os.path.join(ppath, name, 'EMG2.mat'))['EMG2'])
Qxx2, f, t = spectral_density(EMG, int(swin), int(fft_win), 1/SR)
# save the stuff to .mat file
spfile = os.path.join(ppath, name, 'msp_' + name + '.mat')
if pemg2 == True:
so.savemat(spfile, {'mSP':Qxx, 'mSP2':Qxx2, 'freq':f, 'dt':t[1]-t[0],'t':t})
else:
so.savemat(spfile, {'mSP':Qxx, 'freq':f, 'dt':t[1]-t[0],'t':t})
return Pxx, Qxx, f, t
def whiten_spectrogram(ppath, name, fmax=50):
"""
experimental
:param ppath:
:param name:
:param fmax:
:return:
"""
P = so.loadmat(os.path.join(ppath, name, 'sp_' + name + '.mat'), squeeze_me=True)
SPE = P['SP']
freq = P['freq']
ifreq = np.where(freq <= fmax)[0]
SPE = SPE[ifreq,:]
nfilt = 5
filt = np.ones((nfilt, nfilt))
filt = np.divide(filt, filt.sum())
#SPE = scipy.signal.convolve2d(SPE, filt, boundary='symm', mode='same')
m = np.mean(SPE,axis=1)
SPE -= np.tile(m, (SPE.shape[1], 1)).T
SPE = SPE.T
C = np.dot(SPE.T, SPE)
[evals, L] = np.linalg.eigh(C)
idx = np.argsort(evals)
D = np.diag(np.sqrt(evals[idx]))
L = L[:,idx]
W = np.dot(L, np.dot(np.linalg.inv(D),np.dot(L.T,SPE.T)))
nfilt = 2
filt = np.ones((nfilt,nfilt))
filt = np.divide(filt, filt.sum())
W = scipy.signal.convolve2d(W, filt, boundary='symm', mode='same')
return W, D, L
def normalize_spectrogram(ppath, name, fmax=0, band=[], vm=5, pplot=True, sptype='', filt_dim=[]):
"""
Normalize EEG spectrogram by deviding each frequency band by its average value.
:param ppath, name: base folder, recording name
:param fmax: maximum frequency; frequency axis of spectrogram goes from 0 to fmax
if fmax=0, use complete frequency axis
:param band: list or tuple, define lower and upper range of a frequency band,
if pplot=True, plot band, along with spectrogram;
if band=[], disregard
:param vm: color range for plotting spectrogram
:pplot: if True, plot spectrogram along with power band
:sptype: if sptype='fine' plot 'special' spectrogram, save under sp_fine_$name.mat;
otherwise plot 'normal' spectrogram sp_$name.mat
:filt_dim: list or tuple; the two values define the dimensions of box filter
used to filter the normalized spectrogram; if filt_dim=[], then no filtering
:return SPE, t, freq: normalized spectrogram (np.array), time axis, frequency axis
"""
if (len(sptype) == 0) or (sptype=='std'):
P = so.loadmat(os.path.join(ppath, name, 'sp_' + name + '.mat'), squeeze_me=True)
elif sptype == 'fine':
P = so.loadmat(os.path.join(ppath, name, 'sp_fine_' + name + '.mat'), squeeze_me=True)
SPE = P['SP']
freq = P['freq']
t = P['t']
if fmax > 0:
ifreq = np.where(freq <= fmax)[0]
else:
ifreq = np.arange(0, len(freq))
freq = freq[ifreq]
nfilt = 4
filt = np.ones((nfilt,nfilt))
filt = np.divide(filt, filt.sum())
SPE = SPE[ifreq,:]
# before
#SPE = SPE[ifreq]
#W = scipy.signal.convolve2d(SPE, filt, boundary='symm', mode='same')
#sp_mean = W.mean(axis=1)
sp_mean = SPE.mean(axis=1)
SPE = np.divide(SPE, np.tile(sp_mean, (SPE.shape[1], 1)).T)
if len(filt_dim) > 0:
filt = np.ones(filt_dim)
filt = np.divide(filt, filt.sum())
SPE = scipy.signal.convolve2d(SPE, filt, boundary='symm', mode='same')
# get high gamma peaks
if len(band) > 0:
iband = np.where((freq >= band[0]) & (freq <= band[-1]))[0]
pow_band = SPE[iband,:].mean(axis=0)
thr = pow_band.mean() + pow_band.std()
idx = np.where(pow_band > thr)[0]
# plot normalized spectrogram, along with band
if pplot:
plt.ion()
plt.figure()
if len(band) > 0:
med = np.median(SPE.mean(axis=0))
ax1 = plt.subplot(211)
plt.pcolormesh(t, freq, SPE, vmin=0, vmax=vm*med, cmap='jet')
plt.subplot(212, sharex=ax1)
plt.plot(t,SPE[iband,:].mean(axis=0))
plt.plot(t[idx], pow_band[idx], '.')
plt.draw()
return SPE, t, freq[ifreq]
def recursive_spectrogram(ppath, name, sf=0.3, alpha=0.3, pplot=True):
"""
calculate EEG/EMG spectrogram in a way that can be implemented by a closed-loop system.
The spectrogram is temporally filtered using a recursive implementation of a lowpass filter
@Parameters:
ppath/name - mouse EEG recording
sf - smoothing factor along frequency axis
alpha - temporal lowpass filter time constant
pplot - if pplot==True, plot figure
@Return:
SE, SM - EEG, EMG spectrogram
"""
EEG = np.squeeze(so.loadmat(os.path.join(ppath, name, 'EEG.mat'))['EEG'])
EMG = np.squeeze(so.loadmat(os.path.join(ppath, name, 'EMG.mat'))['EMG'])
len_eeg = len(EEG)
fdt = 2.5
SR = get_snr(ppath, name)
# we calculate the powerspectrum for 5s windows
swin = int(np.round(SR) * 5.0)
# but we sample new data each 2.5 s
swinh = int(swin/2.0)
fft_win = int(swin / 5.0)
# number of 2.5s long samples
spoints = int(np.floor(len_eeg / swinh))
SE = np.zeros((int(fft_win/2+1), spoints))
SM = np.zeros((int(fft_win/2+1), spoints))
print("Starting calculating spectrogram for %s..." % name)
for i in range(2, spoints):
# we take the last two swinh windows (the new 2.5 s long sample and the one from
# the last iteration)
x = EEG[(i-2)*swinh:i*swinh]
[p, f] = power_spectrum(x.astype('float'), fft_win, 1.0/SR)
p = smooth_data(p, sf)
# recursive low pass filtering of spectrogram:
# the current state is an estimate of the current sample and the previous state
SE[:,i] = alpha*p + (1-alpha) * SE[:,i-1]
# and the same of EMG
x = EMG[(i-2)*swinh:i*swinh]
[p, f] = power_spectrum(x.astype('float'), fft_win, 1.0/SR)
p = smooth_data(p, sf)
SM[:,i] = alpha*p + (1-alpha) * SM[:,i-1]
if pplot:
# plot EEG spectrogram
t = np.arange(0, SM.shape[1])*fdt
plt.figure()
ax1 = plt.subplot(211)
im = np.where((f>=0) & (f<=30))[0]
med = np.median(SE.max(axis=0))
ax1.imshow(np.flipud(SE[im,:]), vmin=0, vmax=med*2)
plt.xticks(())
ix = list(range(0, 30, 10))
fi = f[im][::-1]
plt.yticks(ix, list(map(int, fi[ix])))
box_off(ax1)
plt.axis('tight')
plt.ylabel('Freq (Hz)')
# plot EMG amplitude
ax2 = plt.subplot(212)
im = np.where((f>=10) & (f<100))[0]
df = np.mean(np.diff(f))
# amplitude is the square root of the integral
ax2.plot(t, np.sqrt(SM[im,:].sum(axis=0)*df)/1000.0)
plt.xlim((0, t[-1]))
plt.ylabel('EMG Ampl (mV)')
plt.xlabel('Time (s)')
box_off(ax2)
plt.show(block=False)
return SE, SM, f
def recursive_sleepstate_rem(ppath, recordings, sf=0.3, alpha=0.3, past_mu=0.2, std_thdelta = 1.5, past_len=120, sdt=2.5, psave=False, xemg=False):
"""
predict a REM period only based on EEG/EMG history; the same algorithm is also used for
closed-loop REM sleep manipulation.
The algorithm uses for REM sleep detection a threshold on delta power, EMG power, and theta/delta power.
For theta/delta I use two thresholds: A hard (larger) threshold and a soft (lower) threshold. Initially,
theta/delta has to cross the hard threshold to initiate a REM period. Then, as long as,
theta/delta is above the soft threshold (and EMG power stays low) REM sleep continues.
@Parameters:
ppath base folder with recordings
recordings list of recordings
sf smoothing factor for each powerspectrum
alpha smoothing factor along time dimension
past_mu percentage (0 .. 1) of brain states that are allowed to have EMG power larger than threshold
during the last $past_len seconds
past_len window to calculate $past_mu
std_thdelta the hard theta/delta threshold is given by, mean(theta/delta) + $std_thdelta * std(theta/delta)
sdt time bin for brain sttate, typically 2.5s
psave if True, save threshold parameters to file.
"""
idf = re.split('_', recordings[0])[0]
# 02/05/2020 changed from int to float:
past_len = float(np.round(past_len/sdt))
# calculate spectrogram
(SE, SM) = ([],[])
for rec in recordings:
A,B, freq = recursive_spectrogram(ppath, rec, sf=sf, alpha=alpha)
SE.append(A)
SM.append(B)
# fuse lists SE and SM
SE = np.squeeze(reduce(lambda x,y: np.concatenate((x,y)), SE))
if not xemg:
SM = np.squeeze(reduce(lambda x,y: np.concatenate((x,y)), SM))
else:
SM = SE
# EEG, EMG bands
ntbins = SE.shape[1]
r_delta = [0.5, 4]
r_theta = [5,12]
# EMG band
r_mu = [300, 500]
i_delta = np.where((freq >= r_delta[0]) & (freq <= r_delta[1]))[0]
i_theta = np.where((freq >= r_theta[0]) & (freq <= r_theta[1]))[0]
i_mu = np.where((freq >= r_mu[0]) & (freq <= r_mu[1]))[0]
pow_delta = np.sum(SE[i_delta,:], axis=0)
pow_theta = np.sum(SE[i_theta,:], axis=0)
pow_mu = np.sum(SM[i_mu,:], axis=0)
# theta/delta
th_delta = np.divide(pow_theta, pow_delta)
thr_th_delta1 = np.nanmean(th_delta) + std_thdelta*np.nanstd(th_delta)
thr_th_delta2 = np.nanmean(th_delta) + 0.0*np.nanstd(th_delta)
thr_delta = pow_delta.mean()
thr_mu = pow_mu.mean() + 0.5* | np.nanstd(pow_mu) | numpy.nanstd |
'''
Authors: <NAME>, <NAME>, <NAME>
Email ID: <EMAIL>, <EMAIL>, <EMAIL>
'''
import keras
import tensorflow as tf
from keras.models import Sequential
from keras.models import Model
#from tensorflow.keras import layers
#from tensorflow.keras import optimizers
from keras.layers import Dense
from keras.layers import LSTM
from keras.layers import Activation
from sklearn.neighbors import KernelDensity
from keras.layers import Masking
from keras.layers import Input
from keras.layers import Concatenate
from keras import optimizers
from scipy.stats import spearmanr
from scipy import stats
from statistics import mean
import copy
import mlflow
import seaborn as sns
import numpy as np
from matplotlib import pyplot as plt
from numpy import genfromtxt
from sklearn.utils import shuffle
import csv
import random
import math
import sklearn
import mlflow
import mlflow.keras
from sklearn.metrics import mean_squared_error
from matplotlib import pyplot as plt
import os
import glob
import multiprocessing as mp
from keras.callbacks import EarlyStopping
from keras.callbacks import ModelCheckpoint
import matplotlib.cm
import matplotlib
import argparse
from sklearn.neighbors import KNeighborsRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.tree import DecisionTreeRegressor
from sklearn.svm import SVR
from sklearn.kernel_ridge import KernelRidge
from xgboost import XGBRegressor
from sklearn.neighbors import RadiusNeighborsRegressor
from xgboost import XGBRFRegressor
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
numLatency = 118
embeddingsFile = "onnxEmbeddings.csv"
lat = []
maxVal = 0
matplotlib.use('Agg')
def parse_latency(file):
global lat
data = np.genfromtxt(file, delimiter=',')
latency = np.mean(data, axis=1)
latency = latency[:numLatency]
lat.append(latency)
#latency = latency/np.amax(latency)
return latency
def parse_features():
Features = []
maxLayer = 0
maxFlops = 0
maxChannel = 0
maxDim = 224
maxKernel = 7
maxPadding = 3
with open(embeddingsFile, newline='') as f:
reader = csv.reader(f)
data = list(reader)
for i in range(len(data)):
temp = [data[i][j * 13:(j + 1) * 13] for j in range((len(data[i]) + 12) // 13 )]
maxLayer = max(maxLayer, len(temp))
for j in range(len(temp)):
maxFlops=max(maxFlops, float(temp[j][12]))
maxChannel = max(maxChannel, int(temp[j][7]))
maxChannel = max(maxChannel, int(temp[j][8]))
Features.append(temp)
numpyFeatures = np.ones((len(Features), maxLayer, 13))
numpyFeatures = numpyFeatures*-1
for i in range(len(Features)):
temp = Features[i]
for j in range(len(temp)):
for k in range(len(temp[j])):
numpyFeatures[i][j][k] = temp[j][k]
if k == 5 or k == 6:
numpyFeatures[i][j][k] = numpyFeatures[i][j][k]/maxDim
elif k == 7 or k == 8:
numpyFeatures[i][j][k] = numpyFeatures[i][j][k]/maxChannel
elif k == 9:
numpyFeatures[i][j][k] = numpyFeatures[i][j][k]/maxKernel
elif k == 12:
numpyFeatures[i][j][k] = numpyFeatures[i][j][k]/maxFlops
return numpyFeatures, maxLayer
def learn_xgb_model(hardware, maxLayer, lat_mean, features, featuresShape, splitPercentage=0.99, shuffleFeatures=True):
numSample = len(lat_mean)
features = features[:numSample]
if shuffleFeatures == True:
features, lat_mean = shuffle(features,lat_mean)
trainf = features[:int(splitPercentage*len(features))]
trainy = lat_mean[:int(splitPercentage*len(features))]
testf = features[int(splitPercentage*len(features)):]
testy = lat_mean[int(splitPercentage*len(features)):]
print("================= Dataset Stage ==============")
print(trainf.shape, trainy.shape, testf.shape, testy.shape)
trainf = np.reshape(trainf, (trainf.shape[0], trainf.shape[1]*trainf.shape[2]))
testf = np.reshape(testf, (testf.shape[0], testf.shape[1]*testf.shape[2]))
model = XGBRegressor()
model.fit(trainf, trainy)
trainPredict = model.predict(trainf)
testPredict = model.predict(testf)
trainScore = math.sqrt(mean_squared_error(trainy, trainPredict))
testScore = math.sqrt(mean_squared_error(testy, testPredict))
### Train Model characteristics
r2_score = sklearn.metrics.r2_score(trainy, trainPredict)
s_coefficient, pvalue = spearmanr(trainy, trainPredict)
writeToFile('Train Score: %f RMSE' % (trainScore))
writeToFile("The R^2 Value for %s: %f"%(hardware, r2_score))
writeToFile("The Spearnman Coefficient and p-value for %s: %f and %f"%(hardware, s_coefficient, pvalue))
plt.figure()
plt.xlabel("Actual Latency (in ms)")
plt.ylabel("Predicted Latency (in ms)")
sns.scatterplot(trainy, trainPredict)
plt.savefig(args.name+'/plots/'+hardware+'_'+args.learning_type+'_'+str(splitPercentage)+'_train.png')
r2_score = sklearn.metrics.r2_score(testy, testPredict)
s_coefficient, pvalue = spearmanr(testy, testPredict)
writeToFile('Test Score: %f RMSE' % (testScore))
writeToFile("The R^2 Value for %s: %f"%(hardware, r2_score))
writeToFile("The Spearnman Coefficient and p-value for %s: %f and %f"%(hardware, s_coefficient, pvalue))
plt.figure()
plt.xlabel("Actual Latency (in ms)")
plt.ylabel("Predicted Latency (in ms)")
sns.scatterplot(testy, testPredict)
plt.savefig(args.name+'/plots/'+hardware+"_"+args.learning_type+'_'+str(1-splitPercentage)+'_test.png')
return model
def learn_xgb_model_collab(hardware, maxLayer, lat_mean, features, featuresShape, splitPercentage=0.99, shuffleFeatures=True):
print('Learning' + hardware)
numSample = len(lat_mean)
features = features[:numSample]
if shuffleFeatures == True:
features, lat_mean = shuffle(features,lat_mean)
testf = features
testy = lat_mean
testf = np.reshape(testf, (testf.shape[0], testf.shape[1]*testf.shape[2]))
results = []
index = []
for i in range(10, numSample):
trainf = features[:i]
trainy = lat_mean[:i]
# print("================= Dataset Stage ==============")
# print(trainf.shape, trainy.shape, testf.shape, testy.shape)
trainf = np.reshape(trainf, (trainf.shape[0], trainf.shape[1]*trainf.shape[2]))
model = XGBRegressor()
model.fit(trainf, trainy)
testPredict = model.predict(testf)
testScore = math.sqrt(mean_squared_error(testy, testPredict))
r2_score = sklearn.metrics.r2_score(testy, testPredict)
s_coefficient, pvalue = spearmanr(testy, testPredict)
results.append(r2_score)
index.append(i)
matplotlib.rcParams['figure.dpi'] = 500
plt.figure()
plt.xlabel("Number of Datapoints")
plt.ylabel("Average R^2")
sns.lineplot(index, results)
plt.savefig(args.name+'/plots/'+hardware+'_indiLearn.png')
f = open(args.name+'/meta/plotdata.txt', a)
s1 = ','.join(map(str, index))
s2 = ','.join(map(str, results))
f.write(hardware+'\n'+s1+'\n'+s2+'\n')
f.close()
def learn_lstm_model(hardware, maxLayer, lat_mean, features, featuresShape):
numSample = len(lat_mean)
features = features[:numSample]
features, lat_mean = shuffle(features,lat_mean)
trainf = features[:int(0.99*len(features))]
trainy = lat_mean[:int(0.99*len(features))]
#testf = features[:int(1.0*len(features))]
#testy = lat_mean[:int(1.0*len(features))]
testf = features[int(0.99*len(features)):]
testy = lat_mean[int(0.99*len(features)):]
print("================= Dataset Stage ==============")
print(trainf.shape, trainy.shape, testf.shape, testy.shape)
#mlflow.keras.autolog()
#Create an LSTM model
model=Sequential()
model.add(Masking(mask_value=-1,input_shape=(maxLayer, featuresShape)))
model.add(LSTM(20, activation='relu'))
model.add(Dense(1, name = 'fc'))
opt = optimizers.Adam(learning_rate=0.001, beta_1=0.9, beta_2=0.999, amsgrad=False)
#initial_learning_rate = 0.01
# lr_schedule = optimizers.schedules.ExponentialDecay(initial_learning_rate,
#opt = optimizers.SGD(learning_rate = initial_learning_rate)
model.compile(loss='mean_squared_error', optimizer=opt, metrics=[keras.metrics.MeanAbsolutePercentageError()])
model.summary()
#filepath="checkpoint-{loss:.5f}-{val_loss:.5f}-{val_mean_absolute_percentage_error}.hdf5"
filepath=args.name+'/models/model.hdf5'
#checkpoint = ModelCheckpoint(filepath, monitor='val_loss', verbose=1, save_best_only=True, mode='min')#montor can be val_loss or loss
checkpoint = ModelCheckpoint(filepath, monitor='loss', verbose=1, save_best_only=True, mode='min')#montor can be val_loss or loss
es = EarlyStopping(monitor='loss', mode='min', verbose=1, patience=50)
val = model.fit(trainf, trainy, epochs=250, batch_size=512, verbose=1, callbacks=[es, checkpoint])
#val = model.fit(trainf, trainy, epochs=250, batch_size=512, verbose=1, callbacks=[es, checkpoint], validation_data=(testf, testy))
model.load_weights(filepath)
trainPredict = model.predict(trainf)
testPredict = model.predict(testf)
trainScore = math.sqrt(mean_squared_error(trainy, trainPredict))
writeToFile('Train Score: %f RMSE' % (trainScore))
testScore = math.sqrt(mean_squared_error(testy, testPredict))
### Train Model characteristics
r2_score = sklearn.metrics.r2_score(trainy, trainPredict)
s_coefficient, pvalue = spearmanr(trainy, trainPredict)
writeToFile('Train Score: %f RMSE' % (trainScore))
writeToFile("The R^2 Value for %s: %f"%(hardware, r2_score))
writeToFile("The Spearnman Coefficient and p-value for %s: %f and %f"%(hardware, s_coefficient, pvalue))
plt.figure()
plt.xlabel("Actual Latency (in ms)")
plt.ylabel("Predicted Latency (in ms)")
sns.scatterplot(trainy, trainPredict[:,0])
#plt.title(hardware+' R2: '+str(r2_score)+' SpearVal: '+str(s_coefficient))
plt.savefig(args.name+'/plots/'+hardware+"_"+args.learning_type+'_train.png')
r2_score = sklearn.metrics.r2_score(testy, testPredict)
s_coefficient, pvalue = spearmanr(testy, testPredict)
writeToFile('Test Score: %f RMSE' % (testScore))
writeToFile("The R^2 Value for %s: %f"%(hardware, r2_score))
writeToFile("The Spearnman Coefficient and p-value for %s: %f and %f"%(hardware, s_coefficient, pvalue))
plt.figure()
plt.xlabel("Actual Latency (in ms)")
plt.ylabel("Predicted Latency (in ms)")
sns.scatterplot(testy, testPredict[:,0])
#plt.title(hardware+' R2: '+str(r2_score)+' SpearVal: '+str(s_coefficient))
plt.savefig(args.name+'/plots/'+hardware+"_"+args.learning_type+'_test.png')
### Adding Other Regressors
extractor = Model(outputs=model.get_layer('fc').input, inputs=model.input)
extractor.summary()
knn = KNeighborsRegressor()
trainPredict = extractor.predict(trainf)
testPredict = extractor.predict(testf)
randForest = RandomForestRegressor()
decisionTree = DecisionTreeRegressor()
svr = SVR()
kernelrdidge = KernelRidge()
xgb = XGBRegressor()
xgbrf = XGBRFRegressor()
modellist = [ ('knn', knn), ('randomForest', randForest), ('dTree', decisionTree), ('svr', svr), ('kerenlrdige', kernelrdidge), ('xgb', xgb), ('xgbrf', xgbrf) ]
for name, model_lowB in modellist:
model_lowB.fit(trainPredict, trainy)
modeltestPred = model_lowB.predict(testPredict)
testScore = math.sqrt(mean_squared_error(testy, modeltestPred))
r2_score = sklearn.metrics.r2_score(testy, modeltestPred)
s_coefficient, pvalue = spearmanr(testy, modeltestPred)
writeToFile('Test Score with %s : %f RMSE' % (name, testScore))
writeToFile("The R^2 Value with %s for %s: %f"%(hardware, name, r2_score))
writeToFile("The Spearnman Coefficient and p-value for %s with %s : %f and %f"%(hardware, name, s_coefficient, pvalue))
plt.figure()
plt.xlabel("Actual Latency (in ms)")
plt.ylabel("Predicted Latency (in ms)")
sns.scatterplot(testy, modeltestPred)
#plt.title(name + hardware+' R2: '+str(r2_score)+' SpearVal: '+str(s_coefficient))
plt.savefig(args.name+'/plots/'+hardware+args.learning_type+'_'+name+'.png')
return (model, modellist, extractor)
'''
This function takes in the dictionary of hardware_names to its maxLayer, latency and features map
net_dict[key][2] - refers to the network features for a hardware and
net_dict[key][1] - refers to the latency for that hardware
1. First determine the mean and std of the latencies for each hardware in the dictionary
2. Sample from the distribution - i.e. from Mu-8*sigma to Mu+2*sigma, at each parts of the distribution, find all indices that intersect in all the hardwares considered here. For ex., if network no. 2374 falls between mu-1*sigma and mu for all the hardware devices in the dictionary, then add 2374 to the representation set for all the hardware
3. Find maxSamples such networks that become the golden representation of the hardware
4. Return the list of lists of maxSamples network representation for all hardwares and also the indices of the representation networks
5. The indices will be used by any hardware not on the list to make and append it's representation
TODO: Not using max samples for now - change
'''
def sample_hwrepresentation(net_dict, maxSamples):
mean_lat = []
sd_lat = []
final_indices = []
#Determining the Mean and Standard Deviation of Latencies
for key in net_dict:
net_dict[key][2] = net_dict[key][2][:numLatency,:,:] #Not required actually.. Simply doing
net_dict[key][1] = net_dict[key][1][:numLatency]
print(np.mean(net_dict[key][1]), np.std(net_dict[key][1]))
mean_lat.append(np.mean(net_dict[key][1]))
sd_lat.append(np.std(net_dict[key][1]))
for i in range(-2,8): #This range might not be enough -- the range should be more generic when hardware increases
index_where = []
index = 0
for key in net_dict:
index_where.append(np.where(np.logical_and(net_dict[key][1] > mean_lat[index]+i*sd_lat[index], net_dict[key][1] <= mean_lat[index]+(i+1)*sd_lat[index])))
index += 1
for j in range(len(index_where)):
index_where[0] = np.intersect1d(index_where[0], index_where[j])
final_intersection = index_where[0]
if len(final_intersection) >= 4:
loop_index = 4
else:
loop_index = len(final_intersection)
hw_features_cncat = []
for j in range(loop_index):
final_indices.append(final_intersection[j])
print("The final indices size is %f"%(len(final_indices)))
for key in net_dict:
hw_features_per_device = []
for j in range(len(final_indices)):
hw_features_per_device.append(net_dict[key][1][final_indices[j]])
net_dict[key][1] = np.delete(net_dict[key][1], final_indices, axis=0)
net_dict[key][2] = np.delete(net_dict[key][2], final_indices, axis=0)
hw_features_cncat.append(hw_features_per_device)
print(len(final_indices), net_dict[key][2].shape)
return final_indices, hw_features_cncat
def random_indices(maxSamples):
rand_indices = []
for i in range(maxSamples):
rand_indices.append(random.randint(0,numLatency-1))
return rand_indices
'''
Function which computes total MACs of each network and samples maxSamples indices from it based on FLOPS.
'''
def flopsBasedIndices(maxSamples):
with open('../DiverseRandNetworkGenerator/Embeddings.csv') as f:
reader = csv.reader(f)
data = list(reader)
totalFLOPSList = np.zeros(len(data))
for i in range(len(data)):
temp = [data[i][j * 13:(j + 1) * 13] for j in range((len(data[i]) + 12) // 13 )]
for j in range(len(temp)):
totalFLOPSList[i]+=int(temp[j][12])
mean = np.mean(totalFLOPSList)
sd = np.std(totalFLOPSList)
def random_sampling(net_dict, rand_indices, maxSamples):
for key in net_dict:
net_dict[key][2] = net_dict[key][2][:numLatency,:,:]
net_dict[key][1] = net_dict[key][1][:numLatency]
hw_features_cncat = []
#rand_indices = []
#final_indices = []
#for i in range(maxSamples):
# rand_indices.append(random.randint(0,5000))
for key in net_dict:
hw_features_per_device = []
for j in range(maxSamples):
hw_features_per_device.append(net_dict[key][1][rand_indices[j]])
hw_features_cncat.append(hw_features_per_device)
#If this is not done separately, the code will break
for key in net_dict:
net_dict[key][1] = | np.delete(net_dict[key][1], rand_indices, axis=0) | numpy.delete |
import numpy as np
class ProblemTest:
'''
Parametes object. Effectively represents each problem that we want to submit to the solver
'''
def __init__(self):
self.N = 100
self.M = 1000
self.K_pol = 3
self.K_cus = 0
self.U = 200
self.T = 1
self.u_min, self.u_max = -10, 10
self.initial_condition = 0
self.sigma = 2
self.transition_function_deterministic = lambda n, x, u: x + u * self.dt
self.transition_function_stochastic = lambda n, x, u: np.sqrt(self.dt) * self.sigma * np.random.randn(self.M)
self.terminal_condition_fnc = lambda x: x ** 2
self.running_reward = lambda x, u: x ** 2 + u ** 2
self.transition_function = lambda n, x, u: self.transition_function_deterministic(n, x, u) + \
self.transition_function_stochastic(n, x, u)
self.generate_training_points = lambda x: np.random.randn(self.M)
self.dt = self.T / self.N
self.K = self.K_pol + self.K_cus
self.custom_basis = | np.array([]) | numpy.array |
import os
import sys
sys.path.append(os.path.dirname(os.path.abspath(__file__)) + '/../')
import argparse
import matplotlib.pyplot as plt
import numpy as np
from datasetapi.voc_dataset.voc_data_processing import generate_wh_xyminmax_list
from datasetapi.coco_dataset.coco_data_processing import get_coco_wh_xyminmax, generate_calibset
import random
random.seed(2021)
# ids_or_names = ["abn1",
# "abn2",
# "abn3",
# "abn4",
# "abn5",
# "abn6",
# "abn7",
# "abn8",
# "abn9",
# "abn10"]
def plot_wh(wh_list, calib_wh_list, ids_or_names=None, plot_idx=None):
"""
:param wh_list: w & h list of training set
:param calib_wh_list: w & h list of calibration set
:param ids_or_names: category or id list
:param plot_idx: which category or id will be plot
:return:
"""
r = lambda: random.randint(0,255)
colors=[]
for i in range(len(ids_or_names) + 1):
color = ('#%02X%02X%02X' % (r(),r(),r()))
colors.append(color)
cls_list = []
plot_w_list = []
plot_h_list = []
calib_cls_list = []
calib_plot_w_list = []
calib_plot_h_list = []
for i in range(len(ids_or_names)):
cls_list.append([])
plot_w_list.append([])
plot_h_list.append([])
calib_cls_list.append([])
calib_plot_w_list.append([])
calib_plot_h_list.append([])
for wh in wh_list:
cls_list[ids_or_names.index(wh[0])].append(wh[0]) # cls_name_or_id = wh[0]
plot_w_list[ids_or_names.index(wh[0])].append(wh[1]) # w = wh[1]
plot_h_list[ids_or_names.index(wh[0])].append(wh[2]) # h = wh[2]
plot_list = [cls_list, plot_w_list, plot_h_list]
for calib_wh in calib_wh_list:
# calib_cls_name = calib_wh[0]
calib_cls_list[ids_or_names.index(calib_wh[0])].append(calib_wh[0])
calib_plot_w_list[ids_or_names.index(calib_wh[0])].append(calib_wh[1]) # w
calib_plot_h_list[ids_or_names.index(calib_wh[0])].append(calib_wh[2]) # h
calib_plot_list = [calib_cls_list, calib_plot_w_list, calib_plot_h_list]
area = np.pi * 2**2
if plot_idx is None:
for i in range(len(plot_list[0])):
cls = plot_list[0][i][0]
x = plot_list[1][i] # w
y = plot_list[2][i] # h
plt.scatter(x, y, s=area, c=colors[ids_or_names.index(cls)], alpha=0.1, label=cls)
calib_cls = calib_plot_list[0][i][0]
calib_x = calib_plot_list[1][i] # w
calib_y = calib_plot_list[2][i] # h
plt.scatter(calib_x, calib_y, s=area, marker='x', c=colors[ids_or_names.index(calib_cls)], alpha=0.5)
else:
cls = plot_list[0][plot_idx][0]
x = plot_list[1][plot_idx] # w
y = plot_list[2][plot_idx] # h
calib_cls = calib_plot_list[0][plot_idx][0]
calib_x = calib_plot_list[1][plot_idx] # w
calib_y = calib_plot_list[2][plot_idx] # h
if type(cls) is str:
plt.scatter(x, y, s=area, c=colors[ids_or_names.index(cls)], alpha=0.1, label='trainset_cls:'+cls)
plt.scatter(calib_x, calib_y, marker='x', c=colors[ids_or_names.index(calib_cls) + 1], alpha=0.3, label='calib_cls:'+calib_cls)
elif type(cls) is int:
plt.scatter(x, y, s=area, c=colors[ids_or_names.index(cls)], alpha=0.1, label='trainset_cls:'+str(cls))
plt.scatter(calib_x, calib_y, s=area, marker='x', c=colors[ids_or_names.index(calib_cls) + 1], alpha=0.3, label='calib_cls:'+str(calib_cls))
line_color = colors[ids_or_names.index(calib_cls) + 1]
plt.plot([np.min(calib_x),np.max(calib_x)], [np.min(calib_y), | np.min(calib_y) | numpy.min |
import numpy as np
from scipy import interpolate
from sklearn.model_selection import KFold
def evaluate(distances, labels, nrof_folds=10):
thresholds = np.arange(0, 4, 0.01)
tpr, fpr, accuracy, best_thresholds = calculate_roc(thresholds, distances,
labels, nrof_folds=nrof_folds)
thresholds = np.arange(0, 4, 0.001)
val, val_std, far = calculate_val(thresholds, distances,
labels, 1e-3, nrof_folds=nrof_folds)
return tpr, fpr, accuracy, val, val_std, far, best_thresholds
def calculate_roc(thresholds, distances, labels, nrof_folds=10):
nrof_pairs = min(len(labels), len(distances))
nrof_thresholds = len(thresholds)
k_fold = KFold(n_splits=nrof_folds, shuffle=False)
tprs = np.zeros((nrof_folds,nrof_thresholds))
fprs = np.zeros((nrof_folds,nrof_thresholds))
accuracy = np.zeros((nrof_folds))
indices = np.arange(nrof_pairs)
for fold_idx, (train_set, test_set) in enumerate(k_fold.split(indices)):
# Find the best threshold for the fold
acc_train = np.zeros((nrof_thresholds))
for threshold_idx, threshold in enumerate(thresholds):
_, _, acc_train[threshold_idx] = calculate_accuracy(threshold, distances[train_set], labels[train_set])
best_threshold_index = np.argmax(acc_train)
for threshold_idx, threshold in enumerate(thresholds):
tprs[fold_idx,threshold_idx], fprs[fold_idx,threshold_idx], _ = calculate_accuracy(threshold, distances[test_set], labels[test_set])
_, _, accuracy[fold_idx] = calculate_accuracy(thresholds[best_threshold_index], distances[test_set], labels[test_set])
tpr = np.mean(tprs,0)
fpr = np.mean(fprs,0)
return tpr, fpr, accuracy, thresholds[best_threshold_index]
def calculate_accuracy(threshold, dist, actual_issame):
predict_issame = np.less(dist, threshold)
tp = np.sum(np.logical_and(predict_issame, actual_issame))
fp = np.sum(np.logical_and(predict_issame, np.logical_not(actual_issame)))
tn = np.sum(np.logical_and(np.logical_not(predict_issame), np.logical_not(actual_issame)))
fn = np.sum(np.logical_and(np.logical_not(predict_issame), actual_issame))
tpr = 0 if (tp+fn==0) else float(tp) / float(tp+fn)
fpr = 0 if (fp+tn==0) else float(fp) / float(fp+tn)
acc = float(tp+tn)/dist.size
return tpr, fpr, acc
def calculate_val(thresholds, distances, labels, far_target=1e-3, nrof_folds=10):
nrof_pairs = min(len(labels), len(distances))
nrof_thresholds = len(thresholds)
k_fold = KFold(n_splits=nrof_folds, shuffle=False)
val = np.zeros(nrof_folds)
far = np.zeros(nrof_folds)
indices = np.arange(nrof_pairs)
for fold_idx, (train_set, test_set) in enumerate(k_fold.split(indices)):
far_train = np.zeros(nrof_thresholds)
for threshold_idx, threshold in enumerate(thresholds):
_, far_train[threshold_idx] = calculate_val_far(threshold, distances[train_set], labels[train_set])
if np.max(far_train)>=far_target:
f = interpolate.interp1d(far_train, thresholds, kind='slinear')
threshold = f(far_target)
else:
threshold = 0.0
val[fold_idx], far[fold_idx] = calculate_val_far(threshold, distances[test_set], labels[test_set])
val_mean = | np.mean(val) | numpy.mean |
#!/usr/bin/env python
# Copyright 2021
# author: <NAME> <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from scipy.stats import ttest_ind
import netCDF4 as nc
import pickle
import os
from PIL import Image as PIL_Image
import sys
import shutil
import glob
import datetime
import time
import calendar
from numpy import genfromtxt
from scipy.optimize import curve_fit
from scipy.cluster.vq import kmeans,vq
from scipy.interpolate import interpn, interp1d
from math import e as e_constant
import math
import matplotlib.dates as mdates
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.cm as cm
from matplotlib.collections import LineCollection
from matplotlib.ticker import (MultipleLocator, NullFormatter, ScalarFormatter)
from matplotlib.colors import ListedColormap, BoundaryNorm
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation
import matplotlib
import warnings
warnings.filterwarnings("ignore")
plt.style.use('classic')
# font size
# font_size = 14
# matplotlib.rc('font', **{'family': 'serif', 'serif': ['Arial'], 'size': font_size})
# matplotlib.rc('font', weight='bold')
p_progress_writing = False
SMALL_SIZE = 8
MEDIUM_SIZE = 10
BIGGER_SIZE = 12
plt.rc('font', size=SMALL_SIZE) # controls default text sizes
plt.rc('axes', titlesize=SMALL_SIZE) # fontsize of the axes title
plt.rc('axes', labelsize=MEDIUM_SIZE) # fontsize of the x and y labels
plt.rc('xtick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('ytick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('legend', fontsize=SMALL_SIZE) # legend fontsize
plt.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title
time_format = '%d-%m-%Y_%H:%M'
time_format_khan = '%Y%m%d.0%H'
time_format_mod = '%Y-%m-%d_%H:%M:%S'
time_format_twolines = '%H:%M\n%d-%m-%Y'
time_format_twolines_noYear_noMin_intMonth = '%H\n%d-%m'
time_format_twolines_noYear = '%H:%M\n%d-%b'
time_format_twolines_noYear_noMin = '%H\n%d-%b'
time_format_date = '%Y-%m-%d'
time_format_time = '%H:%M:%S'
time_format_parsivel = '%Y%m%d%H%M'
time_format_parsivel_seconds = '%Y%m%d%H%M%S'
time_str_formats = [
time_format,
time_format_mod,
time_format_twolines,
time_format_twolines_noYear,
time_format_date,
time_format_time,
time_format_parsivel
]
default_cm = cm.jet
cm_vir = cm.viridis
listed_cm_colors_list = ['silver', 'red', 'green', 'yellow', 'blue', 'black']
listed_cm = ListedColormap(listed_cm_colors_list, 'indexed')
colorbar_tick_labels_list_cloud_phase = ['Clear', 'Water', 'SLW', 'Mixed', 'Ice', 'Unknown']
listed_cm_colors_list_cloud_phase = ['white', 'red', 'green', 'yellow', 'blue', 'purple']
listed_cm_cloud_phase = ListedColormap(listed_cm_colors_list_cloud_phase, 'indexed')
avogadros_ = 6.022140857E+23 # molecules/mol
gas_const = 83144.598 # cm3 mbar k-1 mol-1
gas_const_2 = 8.3144621 # J mol-1 K-1
gas_const_water = 461 # J kg-1 K-1
gas_const_dry = 287 # J kg-1 K-1
boltzmann_ = gas_const / avogadros_ # cm3 mbar / k molecules
gravity_ = 9.80665 # m/s
poisson_ = 2/7 # for dry air (k)
latent_heat_v = 2.501E+6 # J/kg
latent_heat_f = 3.337E+5 # J/kg
latent_heat_s = 2.834E+6 # J/kg
heat_capacity__Cp = 1005.7 # J kg-1 K-1 dry air
heat_capacity__Cv = 719 # J kg-1 K-1 water vapor
Rs_da = 287.05 # Specific gas const for dry air, J kg^{-1} K^{-1}
Rs_v = 461.51 # Specific gas const for water vapour, J kg^{-1} K^{-1}
Cp_da = 1004.6 # Specific heat at constant pressure for dry air
Cv_da = 719. # Specific heat at constant volume for dry air
Cp_v = 1870. # Specific heat at constant pressure for water vapour
Cv_v = 1410. # Specific heat at constant volume for water vapour
Cp_lw = 4218 # Specific heat at constant pressure for liquid water
Epsilon = 0.622 # Epsilon=Rs_da/Rs_v; The ratio of the gas constants
degCtoK = 273.15 # Temperature offset between K and C (deg C)
rho_w = 1000. # Liquid Water density kg m^{-3}
grav = 9.80665 # Gravity, m s^{-2}
Lv = 2.5e6 # Latent Heat of vaporisation
boltzmann = 5.67e-8 # Stefan-Boltzmann constant
mv = 18.0153e-3 # Mean molar mass of water vapor(kg/mol)
m_a = 28.9644e-3 # Mean molar mass of air(kg/mol)
Rstar_a = 8.31432 # Universal gas constant for air (N m /(mol K))
path_output = '/g/data/k10/la6753/'
# Misc
class Object_create(object):
pass
def list_files_recursive(path_, filter_str=None):
# create list of raw spectra files
file_list = []
# r=root, d=directories, f = files
if filter_str is None:
for r, d, f in os.walk(path_):
for file in f:
file_list.append(os.path.join(r, file))
else:
for r, d, f in os.walk(path_):
for file in f:
if filter_str in file:
file_list.append(os.path.join(r, file))
return file_list
def list_files(path_, filter_str='*'):
file_list = sorted(glob.glob(str(path_ + filter_str)))
return file_list
def coincidence(arr_1,arr_2):
# only coincidences
check_ = arr_1 * arr_2
check_[check_ == check_] = 1
arr_1_checked = arr_1 * check_
arr_2_checked = arr_2 * check_
return arr_1_checked[~np.isnan(arr_1_checked)], arr_2_checked[~np.isnan(arr_2_checked)]
def array_2d_fill_gaps_by_interpolation_linear(array_):
rows_ = array_.shape[0]
cols_ = array_.shape[1]
output_array_X = np.zeros((rows_, cols_), dtype=float)
output_array_Y = np.zeros((rows_, cols_), dtype=float)
row_sum = np.sum(array_, axis=1)
col_index = np.arange(array_.shape[1])
col_sum = np.sum(array_, axis=0)
row_index = np.arange(array_.shape[0])
for r_ in range(array_.shape[0]):
if row_sum[r_] != row_sum[r_]:
# get X direction interpolation
coin_out = coincidence(col_index, array_[r_, :])
output_array_X[r_, :][np.isnan(array_[r_, :])] = np.interp(
col_index[np.isnan(array_[r_, :])], coin_out[0], coin_out[1])
for c_ in range(array_.shape[1]):
if col_sum[c_] != col_sum[c_]:
# get Y direction interpolation
coin_out = coincidence(row_index, array_[:, c_])
output_array_Y[:, c_][np.isnan(array_[:, c_])] = np.interp(
row_index[np.isnan(array_[:, c_])], coin_out[0], coin_out[1])
output_array = np.array(array_)
output_array[np.isnan(array_)] = 0
return output_array + ((output_array_X + output_array_Y)/2)
def array_2d_fill_gaps_by_interpolation_cubic(array_):
rows_ = array_.shape[0]
cols_ = array_.shape[1]
output_array_X = np.zeros((rows_, cols_), dtype=float)
output_array_Y = np.zeros((rows_, cols_), dtype=float)
row_sum = np.sum(array_, axis=1)
col_index = np.arange(array_.shape[1])
col_sum = np.sum(array_, axis=0)
row_index = np.arange(array_.shape[0])
for r_ in range(array_.shape[0]):
if row_sum[r_] != row_sum[r_]:
# get X direction interpolation
coin_out = coincidence(col_index, array_[r_, :])
interp_function = interp1d(coin_out[0], coin_out[1], kind='cubic')
output_array_X[r_, :][np.isnan(array_[r_, :])] = interp_function(col_index[np.isnan(array_[r_, :])])
for c_ in range(array_.shape[1]):
if col_sum[c_] != col_sum[c_]:
# get Y direction interpolation
coin_out = coincidence(row_index, array_[:, c_])
interp_function = interp1d(coin_out[0], coin_out[1], kind='cubic')
output_array_Y[:, c_][np.isnan(array_[:, c_])] = interp_function(row_index[np.isnan(array_[:, c_])])
output_array = np.array(array_)
output_array[np.isnan(array_)] = 0
return output_array + ((output_array_X + output_array_Y)/2)
def combine_2_time_series(time_1_reference, data_1, time_2, data_2,
forced_time_step=None, forced_start_time=None, forced_stop_time=None,
cumulative_var_1=False, cumulative_var_2=False):
"""
takes two data sets with respective time series, and outputs the coincident stamps from both data sets
It does this by using mean_discrete() for both sets with the same start stamp and averaging time, the averaging time
is the forced_time_step
:param time_1_reference: 1D array, same units as time_2, this series will define the returned time step reference
:param data_1: can be 1D or 2D array, first dimention most be same as time_1
:param time_2: 1D array, same units as time_1
:param data_2: can be 1D or 2D array, first dimention most be same as time_2
:param window_: optional, if 0 (default) the values at time_1 and time_2 most match exactly, else, the match can
be +- window_
:param forced_time_step: if not none, the median of the differential of the time_1_reference will be used
:param forced_start_time: if not none, the returned series will start at this time stamp
:param forced_stop_time: if not none, the returned series will stop at this time stamp
:param cumulative_var_1: True is you want the variable to be accumulated instead of means, only of 1D data
:param cumulative_var_2: True is you want the variable to be accumulated instead of means, only of 1D data
:return: Index_averaged_1: 1D array, smallest coincident time, without time stamp gaps
:return: Values_mean_1: same shape as data_1 both according to Index_averaged_1 times
:return: Values_mean_2: same shape as data_2 both according to Index_averaged_1 times
"""
# define forced_time_step
if forced_time_step is None:
forced_time_step = np.median(np.diff(time_1_reference))
# find time period
if forced_start_time is None:
first_time_stamp = max(np.nanmin(time_1_reference), np.nanmin(time_2))
else:
first_time_stamp = forced_start_time
if forced_stop_time is None:
last_time_stamp = min(np.nanmax(time_1_reference), np.nanmax(time_2))
else:
last_time_stamp = forced_stop_time
# do the averaging
print('starting averaging of data 1')
if cumulative_var_1:
Index_averaged_1, Values_mean_1 = mean_discrete(time_1_reference, data_1, forced_time_step,
first_time_stamp, last_index=last_time_stamp,
cumulative_parameter_indx=0)
else:
Index_averaged_1, Values_mean_1 = mean_discrete(time_1_reference, data_1, forced_time_step,
first_time_stamp, last_index=last_time_stamp)
print('starting averaging of data 2')
if cumulative_var_2:
Index_averaged_2, Values_mean_2 = mean_discrete(time_2, data_2, forced_time_step,
first_time_stamp, last_index=last_time_stamp,
cumulative_parameter_indx=0)
else:
Index_averaged_2, Values_mean_2 = mean_discrete(time_2, data_2, forced_time_step,
first_time_stamp, last_index=last_time_stamp)
# check that averaged indexes are the same
if np.nansum(np.abs(Index_averaged_1 - Index_averaged_2)) != 0:
print('error during averaging of series, times do no match ????')
return None, None, None
# return the combined, trimmed data
return Index_averaged_1, Values_mean_1, Values_mean_2
def split_str_chunks(s, n):
"""Produce `n`-character chunks from `s`."""
out_list = []
for start in range(0, len(s), n):
out_list.append(s[start:start+n])
return out_list
def coincidence_multi(array_list):
# only coincidences
parameters_list = array_list
check_ = parameters_list[0]
for param_ in parameters_list[1:]:
check_ = check_ * param_
check_[check_ == check_] = 1
new_arr_list = []
for param_ in parameters_list:
new_arr_list.append(param_ * check_)
check_ = check_ * param_
# delete empty rows_
list_list = []
for param_ in parameters_list:
t_list = []
for i in range(check_.shape[0]):
if check_[i] == check_[i]:
t_list.append(param_[i])
list_list.append(t_list)
# concatenate
ar_list = []
for ii in range(len(parameters_list)):
ar_list.append(np.array(list_list[ii]))
return ar_list
def coincidence_zero(arr_1,arr_2):
# only coincidences
check_ = arr_1 * arr_2
# delete empty rows_
list_1 = []
list_2 = []
for i in range(check_.shape[0]):
if check_[i] != 0:
list_1.append(arr_1[i])
list_2.append(arr_2[i])
return np.array(list_1),np.array(list_2)
def discriminate(X_, Y_, Z_, value_disc_list, discrmnt_invert_bin = False):
if discrmnt_invert_bin:
Z_mask = np.ones(Z_.shape[0])
Z_mask[Z_ > value_disc_list[0]] = np.nan
Z_mask[Z_ >= value_disc_list[1]] = 1
Y_new = Y_ * Z_mask
X_new = X_ * Z_mask
else:
Z_mask = np.ones(Z_.shape[0])
Z_mask[Z_ < value_disc_list[0]] = np.nan
Z_mask[Z_ > value_disc_list[1]] = np.nan
Y_new = Y_ * Z_mask
X_new = X_ * Z_mask
return X_new, Y_new
def add_ratio_to_values(header_, values_, nominator_index, denominator_index, ratio_name, normalization_value=1.):
nominator_data = values_[:,nominator_index]
denominator_data = values_[:,denominator_index]
ratio_ = normalization_value * nominator_data / denominator_data
values_new = np.column_stack((values_,ratio_))
header_new = np.append(header_,ratio_name)
return header_new, values_new
def bin_data(x_val_org,y_val_org, start_bin_edge=0, bin_size=1, min_bin_population=1):
# get coincidences only
x_val,y_val = coincidence(x_val_org,y_val_org)
# combine x and y in matrix
M_ = np.column_stack((x_val,y_val))
# checking if always ascending to increase efficiency
always_ascending = 1
for x in range(x_val.shape[0]-1):
if x_val[x]==x_val[x] and x_val[x+1]==x_val[x+1]:
if x_val[x+1] < x_val[x]:
always_ascending = 0
if always_ascending == 0:
M_sorted = M_[M_[:,0].argsort()] # sort by first column
M_ = M_sorted
# convert data to list of bins
y_binned = []
x_binned = []
last_row = 0
last_row_temp = last_row
while start_bin_edge <= np.nanmax(x_val):
y_val_list = []
for row_ in range(last_row, M_.shape[0]):
if start_bin_edge <= M_[row_, 0] < start_bin_edge + bin_size:
if M_[row_, 1] == M_[row_, 1]:
y_val_list.append(M_[row_, 1])
last_row_temp = row_
if M_[row_, 0] >= start_bin_edge + bin_size:
last_row_temp = row_
break
x_binned.append(start_bin_edge)
if len(y_val_list) >= min_bin_population:
y_binned.append(y_val_list)
else:
y_binned.append([])
start_bin_edge += bin_size
last_row = last_row_temp
# add series
if bin_size >= 1:
x_binned_int = np.array(x_binned, dtype=int)
else:
x_binned_int = x_binned
return x_binned_int, y_binned
def shiftedColorMap(cmap, midpoint=0.5, name='shiftedcmap'):
cdict = {
'red': [],
'green': [],
'blue': [],
'alpha': []
}
# regular index to compute the colors
reg_index = np.linspace(0, 1, 257)
# shifted index to match the data
shift_index = np.hstack([
np.linspace(0.0, midpoint, 128, endpoint=False),
np.linspace(midpoint, 1.0, 129, endpoint=True)
])
for ri, si in zip(reg_index, shift_index):
r, g, b, a = cmap(ri)
cdict['red'].append((si, r, r))
cdict['green'].append((si, g, g))
cdict['blue'].append((si, b, b))
cdict['alpha'].append((si, a, a))
newcmap = matplotlib.colors.LinearSegmentedColormap(name, cdict)
plt.register_cmap(cmap=newcmap)
return newcmap
def student_t_test(arr_1, arr_2):
return ttest_ind(arr_1, arr_2, nan_policy='omit')
def k_means_clusters(array_, cluster_number, forced_centers=None):
if forced_centers is None:
centers_, x = kmeans(array_,cluster_number)
data_id, x = vq(array_, centers_)
return centers_, data_id
else:
data_id, x = vq(array_, forced_centers)
return forced_centers, data_id
def grid_(x, y, z, resX=100, resY=100):
"Convert 3 column data to matplotlib grid"
xi = np.linspace(min(x), max(x), resX)
yi = np.linspace(min(y), max(y), resY)
Z = matplotlib.mlab.griddata(x, y, z, xi, yi)
X, Y = np.meshgrid(xi, yi)
return X, Y, Z
def find_max_index_2d_array(array_):
return np.unravel_index(np.argmax(array_, axis=None), array_.shape)
def find_min_index_2d_array(array_):
return np.unravel_index(np.argmin(array_, axis=None), array_.shape)
def find_max_index_1d_array(array_):
return np.argmax(array_, axis=None)
def find_min_index_1d_array(array_):
return np.argmin(array_, axis=None)
def time_series_interpolate_discrete(Index_, Values_, index_step, first_index,
position_=0., last_index=None):
"""
this will average values from Values_ that are between Index_[n:n+avr_size)
:param Index_: n by 1 numpy array to look for position,
:param Values_: n by m numpy array, values to be averaged
:param index_step: in same units as Index_
:param first_index: is the first discrete index on new arrays.
:param position_: will determine where is the stamp located; 0 = beginning, .5 = mid, 1 = top (optional, default = 0)
:param last_index: in case you want to force the returned series to some fixed period/length
:return: Index_averaged, Values_averaged
"""
# checking if always ascending to increase efficiency
always_ascending = 1
for x in range(Index_.shape[0]-1):
if Index_[x]==Index_[x] and Index_[x+1]==Index_[x+1]:
if Index_[x+1] < Index_[x]:
always_ascending = 0
if always_ascending == 0:
MM_ = np.column_stack((Index_,Values_))
MM_sorted = MM_[MM_[:,0].argsort()] # sort by first column
Index_ = MM_sorted[:,0]
Values_ = MM_sorted[:,1:]
# error checking!
if Index_.shape[0] != Values_.shape[0]:
print('error during shape check! Index_.shape[0] != Values_.shape[0]')
return None, None
if Index_[-1] < first_index:
print('error during shape check! Index_[-1] < first_index')
return None, None
# initialize output matrices
if last_index is None:
final_index = np.nanmax(Index_)
else:
final_index = last_index
total_averaged_rows = int((final_index-first_index) / index_step) + 1
if len(Values_.shape) == 1:
Values_mean = np.zeros(total_averaged_rows)
Values_mean[:] = np.nan
else:
Values_mean = np.zeros((total_averaged_rows,Values_.shape[1]))
Values_mean[:,:] = np.nan
Index_interp = np.zeros(total_averaged_rows)
for r_ in range(total_averaged_rows):
Index_interp[r_] = first_index + (r_ * index_step)
Index_interp -= (position_ * index_step)
Values_interp = np.interp(Index_interp, Index_, Values_)
Index_interp = Index_interp + (position_ * index_step)
return Index_interp, Values_interp
def array_2D_sort_ascending_by_column(array_, column_=0):
array_sorted = array_[array_[:, column_].argsort()]
return array_sorted
def get_ax_range(ax):
x_1 = ax.axis()[0]
x_2 = ax.axis()[1]
y_1 = ax.axis()[2]
y_2 = ax.axis()[3]
return x_1, x_2, y_1, y_2
def get_array_perimeter_only(array_):
return np.concatenate([array_[0, :-1], array_[:-1, -1], array_[-1, ::-1], array_[-2:0:-1, 0]])
# WRF
def wrf_var_search(wrf_nc_file, description_str):
description_str_lower = description_str.lower()
for var_ in sorted(wrf_nc_file.variables):
try:
if description_str_lower in wrf_nc_file.variables[var_].description.lower():
print(var_, '|', wrf_nc_file.variables[var_].description)
except:
pass
def create_virtual_sonde_from_wrf(sonde_dict, filelist_wrf_output,
wrf_filename_time_format = 'wrfout_d03_%Y-%m-%d_%H_%M_%S'):
# create time array
filelist_wrf_output_noPath = []
for filename_ in filelist_wrf_output:
filelist_wrf_output_noPath.append(filename_.split('/')[-1])
wrf_time_file_list = np.array(time_str_to_seconds(filelist_wrf_output_noPath, wrf_filename_time_format))
# create lat and lon arrays
wrf_domain_file = nc.Dataset(filelist_wrf_output[0])
# p(sorted(wrf_domain_file.variables))
# wrf_vars = sorted(wrf_domain_file.variables)
# for i_ in range(len(wrf_vars)):
# try:
# print(wrf_vars[i_], '\t\t', wrf_domain_file.variables[wrf_vars[i_]].description)
# except:
# print(wrf_vars[i_])
wrf_lat = wrf_domain_file.variables['XLAT'][0, :, :].filled(np.nan)
wrf_lon = wrf_domain_file.variables['XLONG'][0, :, :].filled(np.nan)
wrf_lat_U = wrf_domain_file.variables['XLAT_U'][0, :, :].filled(np.nan)
wrf_lon_U = wrf_domain_file.variables['XLONG_U'][0, :, :].filled(np.nan)
wrf_lat_V = wrf_domain_file.variables['XLAT_V'][0, :, :].filled(np.nan)
wrf_lon_V = wrf_domain_file.variables['XLONG_V'][0, :, :].filled(np.nan)
wrf_domain_file.close()
# load sonde's profile
sonde_hght = sonde_dict['hght'] # m ASL
sonde_pres = sonde_dict['pres'] # hPa
sonde_time = sonde_dict['time'] # seconds since epoc
sonde_lati = sonde_dict['lati'] # degrees
sonde_long = sonde_dict['long'] # degrees
# create output lists of virtual sonde
list_p__ = []
list_hgh = []
list_th_ = []
list_th0 = []
list_qv_ = []
list_U__ = []
list_V__ = []
list_tim = []
list_lat = []
list_lon = []
wrf_point_abs_address_old = 0
# loop thru real sonde's points
for t_ in range(sonde_hght.shape[0]):
p_progress_bar(t_, sonde_hght.shape[0])
point_hght = sonde_hght[t_]
point_pres = sonde_pres[t_]
point_time = sonde_time[t_]
point_lati = sonde_lati[t_]
point_long = sonde_long[t_]
# find closest cell via lat, lon
index_tuple = find_index_from_lat_lon_2D_arrays(wrf_lat,wrf_lon, point_lati,point_long)
index_tuple_U = find_index_from_lat_lon_2D_arrays(wrf_lat_U,wrf_lon_U, point_lati,point_long)
index_tuple_V = find_index_from_lat_lon_2D_arrays(wrf_lat_V,wrf_lon_V, point_lati,point_long)
# find closest file via time
file_index = time_to_row_sec(wrf_time_file_list, point_time)
# open wrf file
wrf_domain_file = nc.Dataset(filelist_wrf_output[file_index])
# get pressure array from wrf
wrf_press = (wrf_domain_file.variables['PB'][0, :, index_tuple[0], index_tuple[1]].data +
wrf_domain_file.variables['P'][0, :, index_tuple[0], index_tuple[1]].data) / 100 # hPa
# find closest model layer via pressure
layer_index = find_min_index_1d_array(np.abs(wrf_press - point_pres))
# define point absolute address and check if it is a new point
wrf_point_abs_address_new = (index_tuple[0], index_tuple[1], file_index, layer_index)
if wrf_point_abs_address_new != wrf_point_abs_address_old:
wrf_point_abs_address_old = wrf_point_abs_address_new
# get wrf data
index_tuple_full = (0, layer_index, index_tuple[0], index_tuple[1])
index_tuple_full_U = (0, layer_index, index_tuple_U[0], index_tuple_U[1])
index_tuple_full_V = (0, layer_index, index_tuple_V[0], index_tuple_V[1])
# save to arrays
list_p__.append(float(wrf_press[layer_index]))
list_hgh.append(float(point_hght))
list_th_.append(float(wrf_domain_file.variables['T'][index_tuple_full]))
list_th0.append(float(wrf_domain_file.variables['T00'][0]))
list_qv_.append(float(wrf_domain_file.variables['QVAPOR'][index_tuple_full]))
list_U__.append(float(wrf_domain_file.variables['U'][index_tuple_full_U]))
list_V__.append(float(wrf_domain_file.variables['V'][index_tuple_full_V]))
list_tim.append(float(wrf_time_file_list[file_index]))
list_lat.append(float(wrf_lat[index_tuple[0], index_tuple[1]]))
list_lon.append(float(wrf_lon[index_tuple[0], index_tuple[1]]))
wrf_domain_file.close()
# convert lists to arrays
array_p__ = np.array(list_p__)
array_hgh = np.array(list_hgh)
array_th_ = np.array(list_th_)
array_th0 = np.array(list_th0)
array_qv_ = np.array(list_qv_)
array_U__ = np.array(list_U__)
array_V__ = np.array(list_V__)
array_tim = np.array(list_tim)
array_lat = np.array(list_lat)
array_lon = np.array(list_lon)
# calculate derivative variables
wrf_temp_K = calculate_temperature_from_potential_temperature(array_th_ + array_th0, array_p__)
wrf_temp_C = kelvin_to_celsius(wrf_temp_K)
wrf_e = MixR2VaporPress(array_qv_, array_p__*100)
wrf_td_C = DewPoint(wrf_e)
wrf_td_C[wrf_td_C > wrf_temp_C] = wrf_temp_C[wrf_td_C > wrf_temp_C]
wrf_RH = calculate_RH_from_QV_T_P(array_qv_, wrf_temp_K, array_p__*100)
wrf_WD, wrf_WS = cart_to_polar(array_V__, array_U__)
wrf_WD_met = wrf_WD + 180
wrf_WD_met[wrf_WD_met >= 360] = wrf_WD_met[wrf_WD_met >= 360] - 360
wrf_WS_knots = ws_ms_to_knots(wrf_WS)
# create virtual sonde dict
wrf_sonde_dict = {}
wrf_sonde_dict['hght'] = array_hgh
wrf_sonde_dict['pres'] = array_p__
wrf_sonde_dict['temp'] = wrf_temp_C
wrf_sonde_dict['dwpt'] = wrf_td_C
wrf_sonde_dict['sknt'] = wrf_WS_knots
wrf_sonde_dict['drct'] = wrf_WD_met
wrf_sonde_dict['relh'] = wrf_RH
wrf_sonde_dict['time'] = array_tim
wrf_sonde_dict['lati'] = array_lat
wrf_sonde_dict['long'] = array_lon
return wrf_sonde_dict
def wrf_get_temp_K(wrf_nc):
original_arg_type_str = False
if type(wrf_nc) == str:
original_arg_type_str = True
wrf_domain_file = nc.Dataset(wrf_nc)
else:
wrf_domain_file = wrf_nc
# get pressure array from wrf
wrf_press = (wrf_domain_file.variables['PB'][0, :, :, :].data +
wrf_domain_file.variables['P'][0, :, :, :].data) / 100 # hPa
wrf_theta = (wrf_domain_file.variables['T'][0, :, :, :].data +
wrf_domain_file.variables['T00'][0].data) # K
wrf_temp_K = calculate_temperature_from_potential_temperature(wrf_theta, wrf_press)
if original_arg_type_str:
wrf_domain_file.close()
return wrf_temp_K
def wrf_get_press_hPa(wrf_nc):
original_arg_type_str = False
if type(wrf_nc) == str:
original_arg_type_str = True
wrf_domain_file = nc.Dataset(wrf_nc)
else:
wrf_domain_file = wrf_nc
# get pressure array from wrf
wrf_press = (wrf_domain_file.variables['PB'][0, :, :, :].data +
wrf_domain_file.variables['P'][0, :, :, :].data) / 100 # hPa
if original_arg_type_str:
wrf_domain_file.close()
return wrf_press
def wrf_get_height_m(wrf_nc):
original_arg_type_str = False
if type(wrf_nc) == str:
original_arg_type_str = True
wrf_domain_file = nc.Dataset(wrf_nc)
else:
wrf_domain_file = wrf_nc
# get pressure array from wrf
wrf_height = (wrf_domain_file.variables['PH'][0,:-1,:,:].data +
wrf_domain_file.variables['PHB'][0,:-1,:,:].data) / gravity_
if original_arg_type_str:
wrf_domain_file.close()
return wrf_height
def wrf_get_terrain_height_m(wrf_nc):
original_arg_type_str = False
if type(wrf_nc) == str:
original_arg_type_str = True
wrf_domain_file = nc.Dataset(wrf_nc)
else:
wrf_domain_file = wrf_nc
# get pressure array from wrf
wrf_height = (wrf_domain_file.variables['PH'][0,0,:,:].data +
wrf_domain_file.variables['PHB'][0,0,:,:].data) / gravity_
if original_arg_type_str:
wrf_domain_file.close()
return wrf_height
def wrf_get_water_vapor_mixing_ratio(wrf_nc):
original_arg_type_str = False
if type(wrf_nc) == str:
original_arg_type_str = True
wrf_domain_file = nc.Dataset(wrf_nc)
else:
wrf_domain_file = wrf_nc
# get pressure array from wrf
wrf_QVAPOR = wrf_domain_file.variables['QVAPOR'][0,:,:,:].data
if original_arg_type_str:
wrf_domain_file.close()
return wrf_QVAPOR
def wrf_get_cloud_water_mixing_ratio(wrf_nc):
original_arg_type_str = False
if type(wrf_nc) == str:
original_arg_type_str = True
wrf_domain_file = nc.Dataset(wrf_nc)
else:
wrf_domain_file = wrf_nc
# get pressure array from wrf
wrf_QCLOUD = wrf_domain_file.variables['QCLOUD'][0,:,:,:].data
if original_arg_type_str:
wrf_domain_file.close()
return wrf_QCLOUD
def wrf_get_ice_mixing_ratio(wrf_nc):
original_arg_type_str = False
if type(wrf_nc) == str:
original_arg_type_str = True
wrf_domain_file = nc.Dataset(wrf_nc)
else:
wrf_domain_file = wrf_nc
# get pressure array from wrf
wrf_QICE = wrf_domain_file.variables['QICE'][0,:,:,:].data
if original_arg_type_str:
wrf_domain_file.close()
return wrf_QICE
def wrf_get_lat_lon(wrf_nc):
original_arg_type_str = False
if type(wrf_nc) == str:
original_arg_type_str = True
wrf_domain_file = nc.Dataset(wrf_nc)
else:
wrf_domain_file = wrf_nc
# get pressure array from wrf
wrf_lat = wrf_domain_file.variables['XLAT'][0, :, :].filled(np.nan)
wrf_lon = wrf_domain_file.variables['XLONG'][0, :, :].filled(np.nan)
if original_arg_type_str:
wrf_domain_file.close()
return wrf_lat, wrf_lon
def wrf_rename_files_fix_time_format(filename_original_list, original_character=':', replacement_character='_'):
for i_, filename_ in enumerate(filename_original_list):
p_progress_bar(i_, len(filename_original_list))
new_filename = filename_.replace(original_character,replacement_character)
os.rename(filename_, new_filename)
# meteorology
def calculate_saturation_vapor_pressure_wexler(T_array_K):
# result in mb (hPa)
G0 = -0.29912729E+4
G1 = -0.60170128E+4
G2 = 0.1887643854E+2
G3 = -0.28354721E-1
G4 = 0.17838301E-4
G5 = -0.84150417E-9
G6 = 0.44412543E-12
G7 = 0.2858487E+1
e_s = np.exp((G0 * (T_array_K ** -2)) +
(G1 * (T_array_K ** -1)) +
G2 +
(G3 * T_array_K) +
(G4 * (T_array_K ** 2)) +
(G5 * (T_array_K ** 3)) +
(G6 * (T_array_K ** 4)) +
(G7 * np.log(T_array_K)))
return e_s * 0.01
def calculate_saturation_mixing_ratio(P_array_mb, T_array_K):
e_s = calculate_saturation_vapor_pressure_wexler(T_array_K)
q_s = 621.97 * (e_s / (P_array_mb - e_s))
return q_s
def calculate_potential_temperature(T_array_K, P_array_hPa):
potential_temp = T_array_K * ((1000 / P_array_hPa) ** poisson_)
return potential_temp
def calculate_equivalent_potential_temperature(T_array_K, P_array_hPa, R_array_kg_over_kg):
P_o = 1000
T_e = T_array_K + (latent_heat_v * R_array_kg_over_kg / heat_capacity__Cp)
theta_e = T_e * ((P_o/P_array_hPa)**poisson_)
return theta_e
def calculate_temperature_from_potential_temperature(theta_array_K, P_array_hPa):
temperature_ = theta_array_K * ( (P_array_hPa/1000) ** poisson_ )
return temperature_
def calculate_mountain_height_from_sonde(sonde_dict):
"""
calculates H_hat from given values of u_array, v_array, T_array, effective_height, rh_array, q_array, p_array
"""
# Set initial conditions
height = 1000 # metres
# define arrays
WS_array = ws_knots_to_ms(sonde_dict['SKNT'])
U_array, V_array = polar_to_cart(sonde_dict['DRCT'], WS_array)
T_array = celsius_to_kelvin(sonde_dict['TEMP'])
RH_array = sonde_dict['RELH']
P_array = sonde_dict['PRES']
Z_array = sonde_dict['HGHT']
Q_array = sonde_dict['MIXR']/1000
TH_array = sonde_dict['THTA']
# calculated arrays
q_s = calculate_saturation_mixing_ratio(P_array, T_array)
e_ = gas_const_dry / gas_const_water
# gradients
d_ln_TH = np.gradient(np.log(TH_array))
d_z = np.gradient(Z_array)
d_q_s = np.gradient(q_s)
# Dry Brunt - Vaisala
N_dry = gravity_ * d_ln_TH / d_z
N_dry[RH_array >= 90] = 0
# Moist Brunt - Vaisala
term_1_1 = 1 + ( latent_heat_v * q_s / (gas_const_dry * T_array) )
term_1_2 = 1 + ( e_ * (latent_heat_v**2) * q_s / (heat_capacity__Cp * gas_const_dry * (T_array**2) ) )
term_2_1 = d_ln_TH / d_z
term_2_2 = latent_heat_v / (heat_capacity__Cp * T_array)
term_2_3 = d_q_s / d_z
term_3 = d_q_s / d_z # should be d_q_w but sonde data has no cloud water data
N_moist = gravity_ * ( (term_1_1 / term_1_2) * (term_2_1 + ( term_2_2 * term_2_3) ) - term_3 )
N_moist[RH_array < 90] = 0
# define output array
N_2 = (N_dry + N_moist)**2
H_hat_2 = N_2 * (height**2) / (U_array**2)
return H_hat_2
def calculate_mountain_height_from_era5(era5_pressures_filename, era5_surface_filename, point_lat, point_lon,
return_arrays=False, u_wind_mode='u', range_line_degrees=None,
time_start_str_YYYYmmDDHHMM='',time_stop_str_YYYYmmDDHHMM='',
reference_height=1000, return_debug_arrays=False):
"""
calculates H_hat from given values of u_array, v_array, T_array, effective_height, rh_array, q_array, p_array
u_wind_mode: can be u, wind_speed, normal_to_range. If normal_to_range, then range_line most not be none
if range_line_degrees is not None, u_wind_mode will automatically be set to normal_to_range
range_line_degrees: degress (decimals) from north, clockwise, of the mountain range line.
"""
# load files
era5_sur = nc.Dataset(era5_surface_filename, 'r')
era5_pre = nc.Dataset(era5_pressures_filename, 'r')
# check if times are the same for both files
dif_sum = np.sum(np.abs(era5_pre.variables['time'][:] - era5_sur.variables['time'][:]))
if dif_sum > 0:
print('Error, times in selected files are not the same')
return
# check if lat lon are the same for both files
dif_sum = np.sum(np.abs(era5_pre.variables['latitude'][:] - era5_sur.variables['latitude'][:]))
dif_sum = dif_sum + np.sum(np.abs(era5_pre.variables['longitude'][:] - era5_sur.variables['longitude'][:]))
if dif_sum > 0:
print('Error, latitude or longitude in selected files are not the same')
return
# find lat lon index
lat_index, lon_index = find_index_from_lat_lon(era5_sur.variables['latitude'][:],
era5_sur.variables['longitude'][:], [point_lat], [point_lon])
lat_index = lat_index[0]
lon_index = lon_index[0]
# copy arrays
time_array = time_era5_to_seconds(np.array(era5_sur.variables['time'][:]))
r_1 = 0
r_2 = -1
if time_start_str_YYYYmmDDHHMM != '':
r_1 = time_to_row_str(time_array, time_start_str_YYYYmmDDHHMM)
if time_stop_str_YYYYmmDDHHMM != '':
r_2 = time_to_row_str(time_array, time_stop_str_YYYYmmDDHHMM)
time_array = time_array[r_1:r_2]
sp_array = np.array(era5_sur.variables['sp'][r_1:r_2, lat_index, lon_index]) / 100 # hPa
P_array = np.array(era5_pre.variables['level'][:]) # hPa
if range_line_degrees is not None:
WD_, WS_ = cart_to_polar(np.array(era5_pre.variables['v'][r_1:r_2,:,lat_index,lon_index]).flatten(),
np.array(era5_pre.variables['u'][r_1:r_2,:,lat_index,lon_index]).flatten())
WD_delta = WD_ - range_line_degrees
range_normal_component = WS_ * np.sin(np.deg2rad(WD_delta))
U_array = range_normal_component.reshape((sp_array.shape[0], P_array.shape[0]))
else:
if u_wind_mode == 'u':
U_array = np.array(era5_pre.variables['u'][r_1:r_2,:,lat_index,lon_index])
else:
U_array = np.sqrt(np.array(era5_pre.variables['v'][r_1:r_2,:,lat_index,lon_index]) ** 2 +
np.array(era5_pre.variables['u'][r_1:r_2,:,lat_index,lon_index]) ** 2)
T_array = np.array(era5_pre.variables['t'][r_1:r_2, :, lat_index, lon_index])
Q_L_array = np.array(era5_pre.variables['crwc'][r_1:r_2, :, lat_index, lon_index])
RH_array = np.array(era5_pre.variables['r'][r_1:r_2, :, lat_index, lon_index])
Z_array = np.array(era5_pre.variables['z'][r_1:r_2, :, lat_index, lon_index]) / gravity_
# calculate arrays
TH_array = np.zeros((time_array.shape[0], P_array.shape[0]), dtype=float)
for t_ in range(time_array.shape[0]):
TH_array[t_,:] = calculate_potential_temperature(T_array[t_,:], P_array[:])
# calculated arrays
q_s = calculate_saturation_mixing_ratio(P_array, T_array)
e_ = gas_const_dry / gas_const_water
# create output dict
H_hat_2 = {}
# loop tru time stamps
for t_ in range(time_array.shape[0]):
p_progress_bar(t_,time_array.shape[0])
# find surface pressure at this time stamp
surface_p = sp_array[t_]
# find pressure at 1000 meters
pressure_1000m = np.interp(reference_height, Z_array[t_, :], P_array)
pressure_1000m_index = np.argmin(np.abs(P_array - pressure_1000m))
# find extrapolations
ql_0 = np.interp(np.log(surface_p), np.log(P_array), Q_L_array[t_, :])
z__0 = np.interp(np.log(surface_p), np.log(P_array), Z_array[t_, :])
th_0 = np.interp(np.log(surface_p), np.log(P_array), TH_array[t_, :])
qs_0 = np.interp(np.log(surface_p), np.log(P_array), q_s[t_, :])
t__1000 = np.interp(reference_height, Z_array[t_, :], T_array[t_, :])
u__1000 = np.interp(reference_height, Z_array[t_, :], U_array[t_, :])
ql_1000 = np.interp(reference_height, Z_array[t_, :], Q_L_array[t_, :])
z__1000 = reference_height
th_1000 = np.interp(reference_height, Z_array[t_, :], TH_array[t_, :])
qs_1000 = np.interp(reference_height, Z_array[t_, :], q_s[t_, :])
# gradients
d_ln_TH = np.log(th_1000) - np.log(th_0)
d_z = z__1000 - z__0
d_q_s = qs_1000 - qs_0
d_q_w = (d_q_s) + (ql_1000 - ql_0)
# Brunt - Vaisala
if np.max(RH_array[t_, pressure_1000m_index:])>= 90:
# Moist
term_1_1 = 1 + ( latent_heat_v * qs_1000 / (gas_const_dry * t__1000) )
term_1_2 = 1 + ( e_ * (latent_heat_v**2) * qs_1000 /
(heat_capacity__Cp * gas_const_dry * (t__1000**2) ) )
term_2_1 = d_ln_TH / d_z
term_2_2 = latent_heat_v / (heat_capacity__Cp * t__1000)
term_2_3 = d_q_s / d_z
term_3 = d_q_w / d_z
N_2 = gravity_ * ( (term_1_1 / term_1_2) * (term_2_1 + ( term_2_2 * term_2_3) ) - term_3 )
else:
# Dry
N_2 = gravity_ * d_ln_TH / d_z
# populate each time stamp
H_hat_2[time_array[t_]] = N_2 * (reference_height ** 2) / (u__1000 ** 2)
era5_sur.close()
era5_pre.close()
if return_arrays:
H_hat_2_time = sorted(H_hat_2.keys())
H_hat_2_time = np.array(H_hat_2_time)
H_hat_2_vals = np.zeros(H_hat_2_time.shape[0], dtype=float)
for r_ in range(H_hat_2_time.shape[0]):
H_hat_2_vals[r_] = H_hat_2[H_hat_2_time[r_]]
if return_debug_arrays:
return H_hat_2_time, H_hat_2_vals, N_2, u__1000 ** 2
else:
return H_hat_2_time, H_hat_2_vals
else:
return H_hat_2
def calculate_mountain_height_from_WRF(filename_SP, filename_PR,
filename_UU, filename_VV,
filename_TH, filename_QR,
filename_QV, filename_PH,
return_arrays=False, u_wind_mode='u', range_line_degrees=None,
reference_height=1000):
"""
calculates H_hat from WRF point output text files
u_wind_mode: can be u, wind_speed, normal_to_range. If normal_to_range, then range_line most not be none
if range_line_degrees is not None, u_wind_mode will automatically be set to normal_to_range
range_line_degrees: degress (decimals) from north, clockwise, of the mountain range line.
:param filename_SP: fullpath filename of surface pressure file
:param filename_PR: fullpath filename of pressure file
:param filename_UU: fullpath filename of u wind file
:param filename_VV: fullpath filename of v wind file
:param filename_TH: fullpath filename of potential temperature file
:param filename_QR: fullpath filename of rain water mixing ratio file
:param filename_QV: fullpath filename of Water vapor mixing ratio file
:param filename_PH: fullpath filename of geopotential height file
:param return_arrays: if true, will return also brunt vaisalla and wind component squared
:param u_wind_mode: can be u, wind_speed, normal_to_range. If normal_to_range, then range_line most not be none
:param range_line_degrees: if not None, u_wind_mode will automatically be set to normal_to_range
:param reference_height: mean height of mountain range
:return:
H_hat_2
"""
# load arrays from text
SP_array = genfromtxt(filename_SP, dtype=float, skip_header=1)[:,9] / 100 # hPa
PR_array = genfromtxt(filename_PR, dtype=float, skip_header=1)[:,1:] / 100 # hPa
UU_array = genfromtxt(filename_UU, dtype=float, skip_header=1)[:,1:]
VV_array = genfromtxt(filename_VV, dtype=float, skip_header=1)[:,1:]
TH_array = genfromtxt(filename_TH, dtype=float, skip_header=1)[:,1:]
QR_array = genfromtxt(filename_QR, dtype=float, skip_header=1)[:,1:]
QV_array = genfromtxt(filename_QV, dtype=float, skip_header=1)[:,1:]
Z_array = genfromtxt(filename_PH, dtype=float, skip_header=1)[:,1:] # already in meters
# calculate arrays
if range_line_degrees is not None:
WD_, WS_ = cart_to_polar(UU_array.flatten(), VV_array.flatten())
WD_delta = WD_ - range_line_degrees
range_normal_component = WS_ * np.sin(np.deg2rad(WD_delta))
U_array = range_normal_component.reshape((UU_array.shape[0], UU_array.shape[1]))
else:
if u_wind_mode == 'u':
U_array = UU_array
else:
U_array = np.sqrt(UU_array ** 2 + VV_array ** 2)
T_array = calculate_temperature_from_potential_temperature(TH_array, PR_array)
RH_array = calculate_RH_from_QV_T_P(QV_array, T_array, PR_array*100)
q_s = calculate_saturation_mixing_ratio(PR_array, T_array)
e_ = gas_const_dry / gas_const_water
# create output array
H_hat_2 = np.zeros(PR_array.shape[0], dtype=float)
# loop tru time stamps
for r_ in range(PR_array.shape[0]):
p_progress_bar(r_, PR_array.shape[0])
# find surface pressure at this time stamp
surface_p = SP_array[r_]
# find pressure at 1000 meters
pressure_1000m = np.interp(reference_height, Z_array[r_, :], PR_array[r_, :])
pressure_1000m_index = np.argmin(np.abs(PR_array[r_, :] - pressure_1000m))
# find extrapolations
ql_0 = np.interp(np.log(surface_p), np.log(PR_array[r_, :]), QR_array[r_, :])
z__0 = np.interp(np.log(surface_p), np.log(PR_array[r_, :]), Z_array[r_, :])
th_0 = np.interp(np.log(surface_p), np.log(PR_array[r_, :]), TH_array[r_, :])
qs_0 = np.interp(np.log(surface_p), np.log(PR_array[r_, :]), q_s[r_, :])
t__1000 = np.interp(reference_height, Z_array[r_, :], T_array[r_, :])
u__1000 = np.interp(reference_height, Z_array[r_, :], U_array[r_, :])
ql_1000 = np.interp(reference_height, Z_array[r_, :], QR_array[r_, :])
z__1000 = reference_height
th_1000 = np.interp(reference_height, Z_array[r_, :], TH_array[r_, :])
qs_1000 = np.interp(reference_height, Z_array[r_, :], q_s[r_, :])
# gradients
d_ln_TH = np.log(th_1000) - np.log(th_0)
d_z = z__1000 - z__0
d_q_s = qs_1000 - qs_0
d_q_w = (d_q_s) + (ql_1000 - ql_0)
# Brunt - Vaisala
if np.max(RH_array[r_, pressure_1000m_index:])>= 90:
# Moist
term_1_1 = 1 + ( latent_heat_v * qs_1000 / (gas_const_dry * t__1000) )
term_1_2 = 1 + ( e_ * (latent_heat_v**2) * qs_1000 /
(heat_capacity__Cp * gas_const_dry * (t__1000**2) ) )
term_2_1 = d_ln_TH / d_z
term_2_2 = latent_heat_v / (heat_capacity__Cp * t__1000)
term_2_3 = d_q_s / d_z
term_3 = d_q_w / d_z
N_2 = gravity_ * ( (term_1_1 / term_1_2) * (term_2_1 + ( term_2_2 * term_2_3) ) - term_3 )
else:
# Dry
N_2 = gravity_ * d_ln_TH / d_z
# populate each time stamp
H_hat_2[r_] = N_2 * (reference_height ** 2) / (u__1000 ** 2)
if return_arrays:
return H_hat_2, N_2, u__1000 ** 2
else:
return H_hat_2
def calculate_dewpoint_from_T_RH(T_, RH_):
"""
from Magnus formula, using Bolton's constants
:param T_: ambient temperature [Celsius]
:param RH_: relative humidity
:return: Td_ dew point temperature [celsius]
"""
a = 6.112
b = 17.67
c = 243.5
y_ = np.log(RH_/100) + ((b*T_)/(c+T_))
Td_ = (c * y_) / (b - y_)
return Td_
def calculate_RH_from_QV_T_P(arr_qvapor, arr_temp_K, arr_press_Pa):
tv_ = 6.11 * e_constant**((2500000/461) * ((1/273) - (1/arr_temp_K)))
pv_ = arr_qvapor * (arr_press_Pa/100) / (arr_qvapor + 0.622)
return np.array(100 * pv_ / tv_)
def calculate_profile_input_for_cluster_analysis_from_ERA5(p_profile, t_profile, td_profile, q_profile,
u_profile, v_profile, h_profile, surface_p):
"""
takes data from ERA5 for only one time stamp for all pressure levels from 250 to 1000 hPa
:param p_profile: in hPa
:param t_profile: in Celsius
:param td_profile: in Celsius
:param q_profile: in kg/kg
:param u_profile: in m/s
:param v_profile: in m/s
:param h_profile: in m
:param surface_p: in hPa
:return: surface_p, qv_, qu_, tw_, sh_, tt_
"""
# trim profiles from surface to top
# find which levels should be included
levels_total = 0
for i_ in range(p_profile.shape[0]):
if p_profile[i_] > surface_p:
break
levels_total += 1
####################################### find extrapolations
surface_t = np.interp(np.log(surface_p), np.log(p_profile), t_profile)
surface_td = np.interp(np.log(surface_p), np.log(p_profile), td_profile)
surface_q = np.interp(np.log(surface_p), np.log(p_profile), q_profile)
surface_u = np.interp(np.log(surface_p), np.log(p_profile), u_profile)
surface_v = np.interp(np.log(surface_p), np.log(p_profile), v_profile)
surface_h = np.interp(np.log(surface_p), np.log(p_profile), h_profile)
# create temp arrays
T_array = np.zeros(levels_total + 1, dtype=float)
Td_array = np.zeros(levels_total + 1, dtype=float)
Q_array = np.zeros(levels_total + 1, dtype=float)
U_array = np.zeros(levels_total + 1, dtype=float)
V_array = np.zeros(levels_total + 1, dtype=float)
H_array = np.zeros(levels_total + 1, dtype=float)
P_array = np.zeros(levels_total + 1, dtype=float)
T_array[:levels_total] = t_profile[:levels_total]
Td_array[:levels_total] = td_profile[:levels_total]
Q_array[:levels_total] = q_profile[:levels_total]
U_array[:levels_total] = u_profile[:levels_total]
V_array[:levels_total] = v_profile[:levels_total]
H_array[:levels_total] = h_profile[:levels_total]
P_array[:levels_total] = p_profile[:levels_total]
T_array[-1] = surface_t
Td_array[-1] = surface_td
Q_array[-1] = surface_q
U_array[-1] = surface_u
V_array[-1] = surface_v
H_array[-1] = surface_h
P_array[-1] = surface_p
######################################
r_850 = np.argmin(np.abs(P_array - 850))
r_500 = np.argmin(np.abs(P_array - 500))
dp_ = np.abs(np.gradient(P_array))
tt_ = (T_array[r_850] - (2 * T_array[r_500]) + Td_array[r_850])
qu_ = np.sum(Q_array * U_array * dp_) / gravity_
qv_ = np.sum(Q_array * V_array * dp_) / gravity_
tw_ = np.sum(Q_array * dp_) / gravity_
del_u = U_array[r_850] - U_array[r_500]
del_v = V_array[r_850] - V_array[r_500]
del_z = H_array[r_850] - H_array[r_500]
sh_ = ((del_u / del_z) ** 2 + (del_v / del_z) ** 2) ** 0.5
return surface_p, qv_, qu_, tw_, sh_, tt_
def barometric_equation(presb_pa, tempb_k, deltah_m, Gamma=-0.0065):
"""The barometric equation models the change in pressure with
height in the atmosphere.
INPUTS:
presb_k (pa): The base pressure
tempb_k (K): The base temperature
deltah_m (m): The height differential between the base height and the
desired height
Gamma [=-0.0065]: The atmospheric lapse rate
OUTPUTS
pres (pa): Pressure at the requested level
REFERENCE:
http://en.wikipedia.org/wiki/Barometric_formula
"""
return presb_pa * \
(tempb_k/(tempb_k+Gamma*deltah_m))**(grav*m_a/(Rstar_a*Gamma))
def barometric_equation_inv(heightb_m, tempb_k, presb_pa,
prest_pa, Gamma=-0.0065):
"""The barometric equation models the change in pressure with height in
the atmosphere. This function returns altitude given
initial pressure and base altitude, and pressure change.
INPUTS:
heightb_m (m):
presb_pa (pa): The base pressure
tempb_k (K) : The base temperature
deltap_pa (m): The pressure differential between the base height and the
desired height
Gamma [=-0.0065]: The atmospheric lapse rate
OUTPUTS
heightt_m
REFERENCE:
http://en.wikipedia.org/wiki/Barometric_formula
"""
return heightb_m + \
tempb_k * ((presb_pa/prest_pa)**(Rstar_a*Gamma/(grav*m_a))-1) / Gamma
def Theta(tempk, pres, pref=100000.):
"""Potential Temperature
INPUTS:
tempk (K)
pres (Pa)
pref: Reference pressure (default 100000 Pa)
OUTPUTS: Theta (K)
Source: Wikipedia
Prints a warning if a pressure value below 2000 Pa input, to ensure
that the units were input correctly.
"""
try:
minpres = min(pres)
except TypeError:
minpres = pres
if minpres < 2000:
print("WARNING: P<2000 Pa; did you input a value in hPa?")
return tempk * (pref/pres)**(Rs_da/Cp_da)
def TempK(theta, pres, pref=100000.):
"""Inverts Theta function."""
try:
minpres = min(pres)
except TypeError:
minpres = pres
if minpres < 2000:
print("WARNING: P<2000 Pa; did you input a value in hPa?")
return theta * (pres/pref)**(Rs_da/Cp_da)
def ThetaE(tempk, pres, e):
"""Calculate Equivalent Potential Temperature
for lowest model level (or surface)
INPUTS:
tempk: Temperature [K]
pres: Pressure [Pa]
e: Water vapour partial pressure [Pa]
OUTPUTS:
theta_e: equivalent potential temperature
References:
Eq. (9.40) from Holton (2004)
Eq. (22) from Bolton (1980)
<NAME> and <NAME> (2013), 'Land-Ocean Warming
Contrast over a Wide Range of Climates: Convective Quasi-Equilibrium
Theory and Idealized Simulations', J. Climate """
# tempc
tempc = tempk - degCtoK
# Calculate theta
theta = Theta(tempk, pres)
# T_lcl formula needs RH
es = VaporPressure(tempc)
RH = 100. * e / es
# theta_e needs q (water vapour mixing ratio)
qv = MixRatio(e, pres)
# Calculate the temp at the Lifting Condensation Level
T_lcl = ((tempk-55)*2840 / (2840-(np.log(RH/100)*(tempk-55)))) + 55
# print "T_lcl :%.3f"%T_lcl
# DEBUG STUFF ####
theta_l = tempk * \
(100000./(pres-e))**(Rs_da/Cp_da)*(tempk/T_lcl)**(0.28*qv)
# print "theta_L: %.3f"%theta_l
# Calculate ThetaE
theta_e = theta_l * np.exp((Lv * qv) / (Cp_da * T_lcl))
return theta_e
def ThetaE_Bolton(tempk, pres, e, pref=100000.):
"""Theta_E following Bolton (1980)
INPUTS:
tempk: Temperature [K]
pres: Pressure [Pa]
e: Water vapour partial pressure [Pa]
See http://en.wikipedia.org/wiki/Equivalent_potential_temperature
"""
# Preliminary:
T = tempk
qv = MixRatio(e, pres)
Td = DewPoint(e) + degCtoK
kappa_d = Rs_da / Cp_da
# Calculate TL (temp [K] at LCL):
TL = 56 + ((Td-56.)**-1+(np.log(T/Td)/800.))**(-1)
# print "TL: %.3f"%TL
# Calculate Theta_L:
thetaL = T * (pref/(pres-e))**kappa_d*(T/TL)**(0.28*qv)
# print "theta_L: %.3f"%thetaL
# put it all together to get ThetaE
thetaE = thetaL * np.exp((3036./TL-0.78)*qv*(1+0.448*qv))
return thetaE
def ThetaV(tempk, pres, e):
"""Virtual Potential Temperature
INPUTS
tempk (K)
pres (Pa)
e: Water vapour pressure (Pa) (Optional)
OUTPUTS
theta_v : Virtual potential temperature
"""
mixr = MixRatio(e, pres)
theta = Theta(tempk, pres)
return theta * (1+mixr/Epsilon) / (1+mixr)
def GammaW(tempk, pres):
"""Function to calculate the moist adiabatic lapse rate (deg C/Pa) based
on the environmental temperature and pressure.
INPUTS:
tempk (K)
pres (Pa)
RH (%)
RETURNS:
GammaW: The moist adiabatic lapse rate (Deg C/Pa)
REFERENCE:
http://glossary.ametsoc.org/wiki/Moist-adiabatic_lapse_rate
(Note that I multiply by 1/(grav*rho) to give MALR in deg/Pa)
"""
tempc = tempk-degCtoK
es = VaporPressure(tempc)
ws = MixRatio(es, pres)
# tempv=VirtualTempFromMixR(tempk,ws)
tempv = VirtualTemp(tempk, pres, es)
latent = Latentc(tempc)
Rho = pres / (Rs_da*tempv)
# This is the previous implementation:
# A=1.0+latent*ws/(Rs_da*tempk)
# B=1.0+Epsilon*latent*latent*ws/(Cp_da*Rs_da*tempk*tempk)
# Gamma=(A/B)/(Cp_da*Rho)
# This is algebraically identical but a little clearer:
A = -1. * (1.0+latent*ws/(Rs_da*tempk))
B = Rho * (Cp_da+Epsilon*latent*latent*ws/(Rs_da*tempk*tempk))
Gamma = A / B
return Gamma
def DensHumid(tempk, pres, e):
"""Density of moist air.
This is a bit more explicit and less confusing than the method below.
INPUTS:
tempk: Temperature (K)
pres: static pressure (Pa)
mixr: mixing ratio (kg/kg)
OUTPUTS:
rho_air (kg/m^3)
SOURCE: http://en.wikipedia.org/wiki/Density_of_air
"""
pres_da = pres - e
rho_da = pres_da / (Rs_da * tempk)
rho_wv = e/(Rs_v * tempk)
return rho_da + rho_wv
def Density(tempk, pres, mixr):
"""Density of moist air
INPUTS:
tempk: Temperature (K)
pres: static pressure (Pa)
mixr: mixing ratio (kg/kg)
OUTPUTS:
rho_air (kg/m^3)
"""
virtualT = VirtualTempFromMixR(tempk, mixr)
return pres / (Rs_da * virtualT)
def VirtualTemp(tempk, pres, e):
"""Virtual Temperature
INPUTS:
tempk: Temperature (K)
e: vapour pressure (Pa)
p: static pressure (Pa)
OUTPUTS:
tempv: Virtual temperature (K)
SOURCE: hmmmm (Wikipedia)."""
tempvk = tempk / (1-(e/pres)*(1-Epsilon))
return tempvk
def VirtualTempFromMixR(tempk, mixr):
"""Virtual Temperature
INPUTS:
tempk: Temperature (K)
mixr: Mixing Ratio (kg/kg)
OUTPUTS:
tempv: Virtual temperature (K)
SOURCE: hmmmm (Wikipedia). This is an approximation
based on a m
"""
return tempk * (1.0+0.6*mixr)
def Latentc(tempc):
"""Latent heat of condensation (vapourisation)
INPUTS:
tempc (C)
OUTPUTS:
L_w (J/kg)
SOURCE:
http://en.wikipedia.org/wiki/Latent_heat#Latent_heat_for_condensation_of_water
"""
return 1000 * (2500.8 - 2.36*tempc + 0.0016*tempc**2 - 0.00006*tempc**3)
def VaporPressure(tempc, phase="liquid"):
"""Water vapor pressure over liquid water or ice.
INPUTS:
tempc: (C) OR dwpt (C), if SATURATION vapour pressure is desired.
phase: ['liquid'],'ice'. If 'liquid', do simple dew point. If 'ice',
return saturation vapour pressure as follows:
Tc>=0: es = es_liquid
Tc <0: es = es_ice
RETURNS: e_sat (Pa)
SOURCE: http://cires.colorado.edu/~voemel/vp.html (#2:
CIMO guide (WMO 2008), modified to return values in Pa)
This formulation is chosen because of its appealing simplicity,
but it performs very well with respect to the reference forms
at temperatures above -40 C. At some point I'll implement Goff-Gratch
(from the same resource).
"""
over_liquid = 6.112 * np.exp(17.67*tempc/(tempc+243.12))*100.
over_ice = 6.112 * np.exp(22.46*tempc/(tempc+272.62))*100.
# return where(tempc<0,over_ice,over_liquid)
if phase == "liquid":
# return 6.112*exp(17.67*tempc/(tempc+243.12))*100.
return over_liquid
elif phase == "ice":
# return 6.112*exp(22.46*tempc/(tempc+272.62))*100.
return np.where(tempc < 0, over_ice, over_liquid)
else:
raise NotImplementedError
def SatVap(dwpt, phase="liquid"):
"""This function is deprecated, return ouput from VaporPres"""
print("WARNING: This function is deprecated, please use VaporPressure()" +
" instead, with dwpt as argument")
return VaporPressure(dwpt, phase)
def MixRatio(e, p):
"""Mixing ratio of water vapour
INPUTS
e (Pa) Water vapor pressure
p (Pa) Ambient pressure
RETURNS
qv (kg kg^-1) Water vapor mixing ratio`
"""
return Epsilon * e / (p - e)
def MixR2VaporPress(qv, p):
"""Return Vapor Pressure given Mixing Ratio and Pressure
INPUTS
qv (kg kg^-1) Water vapor mixing ratio`
p (Pa) Ambient pressure
RETURNS
e (Pa) Water vapor pressure
"""
return qv * p / (Epsilon + qv)
def DewPoint(e):
""" Use Bolton's (1980, MWR, p1047) formulae to find tdew.
INPUTS:
e (Pa) Water Vapor Pressure
OUTPUTS:
Td (C)
"""
ln_ratio = np.log(e/611.2)
Td = ((17.67-ln_ratio)*degCtoK+243.5*ln_ratio)/(17.67-ln_ratio)
return Td - degCtoK
def WetBulb(tempc, RH):
"""Stull (2011): Wet-Bulb Temperature from Relative Humidity and Air
Temperature.
INPUTS:
tempc (C)
RH (%)
OUTPUTS:
tempwb (C)
"""
Tw = tempc * np.arctan(0.151977*(RH+8.313659)**0.5) + \
np.arctan(tempc+RH) - np.arctan(RH-1.676331) + \
0.00391838*RH**1.5*np.arctan(0.023101*RH) - \
4.686035
return Tw
# unit conversions
def convert_unit_and_save_data_ppb_ugm3(filename_, station_name):
# https://uk-air.defra.gov.uk/assets/documents/reports/cat06/0502160851_Conversion_Factors_Between_ppb_and.pdf
# http://www2.dmu.dk/AtmosphericEnvironment/Expost/database/docs/PPM_conversion.pdf
parameters_unit_scaling = {'11' : 1.96, # O3
'10' : 1.25, # NO
'9' : 1.88, # NO2
'16' : 2.62, # SO2
'8' : 1.15} # CO
new_unit_name = '[$\mu$g/m$^3$]'
parameter_name_mod = {'9' : 'NO$_2$',
'11' : 'O$_3$',
'12' : 'PM$_1$$_0$',
'13' : 'PM$_2$$_.$$_5$',
'7' : 'CO$_2$',
'16' : 'SO$_2$',
}
# station_name = 'QF_01'
data_array = open_csv_file(filename_)
current_header = data_array[0,:]
new_header = np.array(current_header)
v_current = np.array(data_array[1:,:],dtype=float)
v_new = np.array(v_current)
for keys_ in parameters_unit_scaling.keys():
v_new[:, int(keys_)] = v_current[:, int(keys_)] * parameters_unit_scaling[str(keys_)]
# add station name suffix
for i_ in range(5,22):
if str(i_) in parameter_name_mod.keys():
parameter_name = parameter_name_mod[str(i_)]
else:
parameter_name = current_header[i_].split('_')[0]
if str(i_) in parameters_unit_scaling.keys():
parameter_unit = new_unit_name
else:
parameter_unit = current_header[i_].split('_')[1]
new_header[i_] = station_name + '_' + parameter_name + '_' + parameter_unit
data_array[1:,:] = v_new
data_array[0,:] = new_header
filename_new = filename_.split('\\')[-1].split('.')[0] + '_unit_converted.csv'
current_filename_without_path = filename_.split('\\')[-1]
current_filename_path = filename_[:-len(current_filename_without_path)]
numpy_save_txt(current_filename_path + filename_new, data_array)
print('done!')
def save_data_with_unit_conversion_ppb_ugm3(file_list_path):
file_list = sorted(glob.glob(str(file_list_path + '\\' + '*.csv')))
# https://uk-air.defra.gov.uk/assets/documents/reports/cat06/0502160851_Conversion_Factors_Between_ppb_and.pdf
# http://www2.dmu.dk/AtmosphericEnvironment/Expost/database/docs/PPM_conversion.pdf
parameters_unit_scaling = {'12' : 1.96, # O3
'13' : 1.25, # NO
'14' : 1.88, # NO2
'15' : 2.62, # SO2
'16' : 1.15} # CO
parameters_new_names = ['YYYY', # 0
'MM', # 1
'DD', # 2
'HH', # 3
'mm', # 4
'Day of the week', # 5
'WD degrees', # 6
'WS m/s', # 7
'Temp Celsius', # 8
'RH %', # 9
'SR W/m2', # 10
'ATP mbar', # 11
'O3 ug/m3', # 12
'NO ug/m3', # 13
'NO2 ug/m3', # 14
'SO2 ug/m3', # 15
'CO mg/m3', # 16
'CO2 ppm', # 17
'PM10 ug/m3', # 18
'PM2.5 ug/m3', # 19
'THC ppm', # 20
'Rain mm', # 21
'Ox ppb', # 22
'NOx ppb'] # 23
for month_ in range(1,13):
print(month_)
filename_old = file_list[month_ -1]
data_array = open_csv_file(file_list[month_ -1])
v_ppb = np.array(data_array[1:,:],dtype=float)
v_ug_m3 = np.array(v_ppb)
for keys_ in parameters_unit_scaling.keys():
v_ug_m3[:, int(keys_)] = v_ppb[:, int(keys_)] * parameters_unit_scaling[str(keys_)]
data_array[0, :] = parameters_new_names
data_array[1:,:] = v_ug_m3
filename_new = filename_old.split('\\')[-1].split('.')[0] + '_ugm3.csv'
numpy_save_txt(file_list_path + '\\' + filename_new, data_array)
print('done!')
def RH_to_abs_conc(arr_RH,arr_T):
a_ = 1-(373.15/arr_T)
c_1 = 13.3185
c_2 = -1.97
c_3 = -.6445
c_4 = -.1299
Po_H2O = 1013.25 * e_constant ** ((c_1 * (a_**1)) +
(c_2 * (a_**2)) +
(c_3 * (a_**3)) +
(c_4 * (a_**4)) ) # mbar
return (arr_RH * Po_H2O) / (100 * boltzmann_ * arr_T)
def Mixing_Ratio_to_molecules_per_cm3(arr_MR, ATP_mbar, Temp_C):
arr_temp = Temp_C + 273.15 # kelvin
arr_Molec_per_cm3 = arr_MR * ( ATP_mbar / ( boltzmann_ * arr_temp ) ) # molecules / cm3
return arr_Molec_per_cm3
def molecules_per_cm3_to_Mixing_Ratio(arr_Molec_per_cm3, ATP_mbar, Temp_C):
arr_temp = Temp_C + 273.15 # kelvin
arr_MR = (arr_Molec_per_cm3 * boltzmann_ * arr_temp) / ATP_mbar
return arr_MR
def ws_knots_to_ms(arr_):
return arr_ * .514444
def ws_ms_to_knots(arr_):
return arr_ / .514444
def kelvin_to_celsius(arr_temp_k):
return arr_temp_k - 273.15
def celsius_to_kelvin(arr_temp_c):
return arr_temp_c + 273.15
# geo reference
def find_index_from_lat_lon(series_lat, series_lon, point_lat_list, point_lon_list):
lat_index_list = []
lon_index_list = []
# mask arrays
lat_m = series_lat
lon_m = series_lon
if np.sum(lat_m) != np.sum(lat_m) or np.sum(lon_m) != np.sum(lon_m):
lat_m = np.ma.masked_where(np.isnan(lat_m), lat_m)
lat_m = np.ma.masked_where(np.isinf(lat_m), lat_m)
lon_m = np.ma.masked_where(np.isnan(lon_m), lon_m)
lon_m = np.ma.masked_where(np.isinf(lon_m), lon_m)
if type(point_lat_list) == tuple or type(point_lat_list) == list:
for lat_ in point_lat_list:
lat_index_list.append(np.argmin(np.abs(lat_m - lat_)))
for lon_ in point_lon_list:
lon_index_list.append(np.argmin(np.abs(lon_m - lon_)))
else:
lat_index_list = np.argmin(np.abs(lat_m - point_lat_list))
lon_index_list = np.argmin(np.abs(lon_m - point_lon_list))
return lat_index_list, lon_index_list
def find_index_from_lat_lon_2D_arrays(lat_arr, lon_arr, point_lat, point_lon):
lat_del_arr = lat_arr - point_lat
lon_del_arr = lon_arr - point_lon
dist_arr = ( lat_del_arr**2 + lon_del_arr**2 )**0.5
return find_min_index_2d_array(dist_arr)
def find_index_from_lat_lon_1D_arrays(lat_arr, lon_arr, point_lat, point_lon):
lat_del_arr = lat_arr - point_lat
lon_del_arr = lon_arr - point_lon
dist_arr = ( lat_del_arr**2 + lon_del_arr**2 )**0.5
return find_min_index_1d_array(dist_arr)
def distance_array_lat_lon_2D_arrays_degrees(lat_arr, lon_arr, point_lat, point_lon):
lat_del_arr = lat_arr - point_lat
lon_del_arr = lon_arr - point_lon
return ( lat_del_arr**2 + lon_del_arr**2 )**0.5
def meter_per_degrees(lat_point):
lat_mean_rad = np.deg2rad(np.abs(lat_point))
m_per_deg_lat = 111132.954 - 559.822 * np.cos(2 * lat_mean_rad) + 1.175 * np.cos(4 * lat_mean_rad)
m_per_deg_lon = 111132.954 * np.cos(lat_mean_rad)
return np.abs(m_per_deg_lat), np.abs(m_per_deg_lon)
def degrees_per_meter(lat_point):
m_per_deg_lat, m_per_deg_lon = meter_per_degrees(lat_point)
return 1/m_per_deg_lat, 1/m_per_deg_lon
def distance_array_lat_lon_2D_arrays_degress_to_meters(lat_arr, lon_arr, point_lat, point_lon):
m_per_deg_lat, m_per_deg_lon = meter_per_degrees( | np.nanmean(lat_arr) | numpy.nanmean |
import math
import warnings
import numpy as np
from scipy import stats
from statsmodels.tsa import stattools
from scipy.interpolate import interp1d
from scipy.stats import chi2
from .Base import Base
from .features.irregular_autoregressive import IAR_phi, CIAR_phiR_beta
from .features.structure_function import SF_ML_amplitude, SF_ML_gamma
from .features.damped_random_walk import GP_DRW_sigma, GP_DRW_tau
from .features.harmonics import Harmonics
from .features.periods import Period_fit_v2, PeriodPowerRate, PeriodLS_v2
from .features.conditional_autoregressive import CAR_mean, CAR_sigma, CAR_tau
class Amplitude(Base):
"""Half the difference between the maximum and the minimum magnitude"""
def __init__(self, shared_data):
super().__init__(shared_data)
self.Data = ['magnitude']
def fit(self, data):
magnitude = data[0]
n = len(magnitude)
sorted_mag = np.sort(magnitude)
return (np.median(sorted_mag[int(-math.ceil(0.05 * n)):]) -
np.median(sorted_mag[0:int(math.ceil(0.05 * n))])) / 2.0
class Rcs(Base):
"""Range of cumulative sum"""
def __init__(self, shared_data):
super().__init__(shared_data)
self.Data = ['magnitude']
def fit(self, data):
magnitude = data[0]
sigma = np.std(magnitude)
N = len(magnitude)
m = np.mean(magnitude)
s = np.cumsum(magnitude - m) * 1.0 / (N * sigma)
R = np.max(s) - np.min(s)
return R
class StetsonK(Base):
def __init__(self, shared_data):
super().__init__(shared_data)
self.Data = ['magnitude', 'error']
def fit(self, data):
magnitude = data[0]
error = data[2]
n = len(magnitude)
mean_mag = (np.sum(magnitude/(error*error))/np.sum(1.0 / (error * error)))
sigmap = (np.sqrt(n * 1.0 / (n - 1)) *
(magnitude - mean_mag) / error)
k = (1 / np.sqrt(n * 1.0) * np.sum(np.abs(sigmap)) / np.sqrt(np.sum(sigmap ** 2)))
return k
class Meanvariance(Base):
"""variability index"""
def __init__(self, shared_data):
super().__init__(shared_data)
self.Data = ['magnitude']
def fit(self, data):
magnitude = data[0]
return np.std(magnitude) / np.mean(magnitude)
class Autocor_length(Base):
def __init__(self, shared_data, lags=100):
super().__init__(shared_data)
self.Data = ['magnitude']
self.nlags = lags
def fit(self, data):
magnitude = data[0]
ac = stattools.acf(magnitude, nlags=self.nlags, fft=False)
k = next((index for index, value in
enumerate(ac) if value < np.exp(-1)), None)
while k is None:
if self.nlags > len(magnitude):
warnings.warn('Setting autocorrelation length as light curve length')
return len(magnitude)
self.nlags = self.nlags + 100
ac = stattools.acf(magnitude, nlags=self.nlags, fft=False)
k = next((index for index, value in
enumerate(ac) if value < np.exp(-1)), None)
return k
class SlottedA_length(Base):
def __init__(self, shared_data, T=-99):
"""
lc: MACHO lightcurve in a pandas DataFrame
k: lag (default: 1)
T: tau (slot size in days. default: 4)
"""
super().__init__(shared_data)
self.Data = ['magnitude', 'time']
SlottedA_length.SAC = []
self.T = T
def slotted_autocorrelation(self, data, time, T, K,
second_round=False, K1=100):
slots = np.zeros((K, 1))
i = 1
# make time start from 0
time = time - np.min(time)
# subtract mean from mag values
m = np.mean(data)
data = data - m
prod = np.zeros((K, 1))
pairs = np.subtract.outer(time, time)
pairs[np.tril_indices_from(pairs)] = 10000000
ks = np.int64(np.floor(np.abs(pairs) / T + 0.5))
# We calculate the slotted autocorrelation for k=0 separately
idx = np.where(ks == 0)
prod[0] = ((sum(data ** 2) + sum(data[idx[0]] *
data[idx[1]])) / (len(idx[0]) + len(data)))
slots[0] = 0
# We calculate it for the rest of the ks
if second_round is False:
for k in np.arange(1, K):
idx = np.where(ks == k)
if len(idx[0]) != 0:
prod[k] = sum(data[idx[0]] * data[idx[1]]) / (len(idx[0]))
slots[i] = k
i = i + 1
else:
prod[k] = np.infty
else:
for k in np.arange(K1, K):
idx = np.where(ks == k)
if len(idx[0]) != 0:
prod[k] = sum(data[idx[0]] * data[idx[1]]) / (len(idx[0]))
slots[i - 1] = k
i = i + 1
else:
prod[k] = np.infty
np.trim_zeros(prod, trim='b')
slots = np.trim_zeros(slots, trim='b')
return prod / prod[0], np.int64(slots).flatten()
def fit(self, data):
magnitude = data[0]
time = data[1]
N = len(time)
if self.T == -99:
deltaT = time[1:] - time[:-1]
sorted_deltaT = np.sort(deltaT)
self.T = sorted_deltaT[int(N * 0.05)+1]
K = 100
[SAC, slots] = self.slotted_autocorrelation(magnitude, time, self.T, K)
SAC2 = SAC[slots]
SlottedA_length.autocor_vector = SAC2
k = next((index for index, value in
enumerate(SAC2) if value < np.exp(-1)), None)
while k is None:
K = K+K
if K > (np.max(time) - np.min(time)) / self.T:
break
else:
[SAC, slots] = self.slotted_autocorrelation(magnitude,
time, self.T, K,
second_round=True,
K1=K/2)
SAC2 = SAC[slots]
k = next((index for index, value in
enumerate(SAC2) if value < np.exp(-1)), None)
return slots[k] * self.T
def getAtt(self):
return SlottedA_length.autocor_vector
class StetsonK_AC(SlottedA_length):
def __init__(self, shared_data):
super().__init__(shared_data)
self.Data = ['magnitude', 'time', 'error']
def fit(self, data):
try:
a = StetsonK_AC(self.shared_data)
autocor_vector = a.getAtt()
N_autocor = len(autocor_vector)
sigmap = (np.sqrt(N_autocor * 1.0 / (N_autocor - 1)) *
(autocor_vector - np.mean(autocor_vector)) /
np.std(autocor_vector))
K = (1 / np.sqrt(N_autocor * 1.0) *
np.sum(np.abs(sigmap)) / np.sqrt(np.sum(sigmap ** 2)))
return K
except:
print("error: please run SlottedA_length first to generate values for StetsonK_AC ")
class StetsonL(Base):
def __init__(self, shared_data):
super().__init__(shared_data)
self.Data = ['magnitude', 'time', 'error', 'magnitude2', 'error2']
def fit(self, data):
aligned_magnitude = data[4]
aligned_magnitude2 = data[5]
aligned_error = data[7]
aligned_error2 = data[8]
N = len(aligned_magnitude)
mean_mag = (np.sum(aligned_magnitude/(aligned_error*aligned_error)) /
np.sum(1.0 / (aligned_error * aligned_error)))
mean_mag2 = (np.sum(aligned_magnitude2/(aligned_error2*aligned_error2)) /
np.sum(1.0 / (aligned_error2 * aligned_error2)))
sigmap = (np.sqrt(N * 1.0 / (N - 1)) *
(aligned_magnitude[:N] - mean_mag) /
aligned_error)
sigmaq = (np.sqrt(N * 1.0 / (N - 1)) *
(aligned_magnitude2[:N] - mean_mag2) /
aligned_error2)
sigma_i = sigmap * sigmaq
J = (1.0 / len(sigma_i) *
np.sum(np.sign(sigma_i) * np.sqrt(np.abs(sigma_i))))
K = (1 / np.sqrt(N * 1.0) *
np.sum(np.abs(sigma_i)) / np.sqrt(np.sum(sigma_i ** 2)))
return J * K / 0.798
class Con(Base):
"""Index introduced for selection of variable starts from OGLE database.
To calculate Con, we counted the number of three consecutive measurements
that are out of 2sigma range, and normalized by N-2
Pavlos not happy
"""
def __init__(self, shared_data, consecutiveStar=3):
super().__init__(shared_data)
self.Data = ['magnitude']
self.consecutiveStar = consecutiveStar
def fit(self, data):
magnitude = data[0]
N = len(magnitude)
if N < self.consecutiveStar:
return 0
sigma = np.std(magnitude)
m = np.mean(magnitude)
count = 0
for i in range(N - self.consecutiveStar + 1):
flag = 0
for j in range(self.consecutiveStar):
if (magnitude[i + j] > m + 2 * sigma
or magnitude[i + j] < m - 2 * sigma):
flag = 1
else:
flag = 0
break
if flag:
count = count + 1
return count * 1.0 / (N - self.consecutiveStar + 1)
class Color(Base):
"""Average color for each MACHO lightcurve
mean(B1) - mean(B2)
"""
def __init__(self, shared_data):
super().__init__(shared_data)
self.Data = ['magnitude', 'time', 'magnitude2']
def fit(self, data):
magnitude = data[0]
magnitude2 = data[3]
return np.mean(magnitude) - np.mean(magnitude2)
class Beyond1Std(Base):
"""Percentage of points beyond one st. dev. from the weighted
(by photometric errors) mean
"""
def __init__(self, shared_data):
super().__init__(shared_data)
self.Data = ['magnitude', 'error']
def fit(self, data):
magnitude = data[0]
error = data[2]
n = len(magnitude)
weighted_mean = np.average(magnitude, weights=1 / error ** 2)
# Standard deviation with respect to the weighted mean
var = sum((magnitude - weighted_mean) ** 2)
std = np.sqrt((1.0 / (n - 1)) * var)
count = np.sum(np.logical_or(magnitude > weighted_mean + std,
magnitude < weighted_mean - std))
return float(count) / n
class SmallKurtosis(Base):
"""Small sample kurtosis of the magnitudes.
See http://www.xycoon.com/peakedness_small_sample_test_1.htm
"""
def __init__(self, shared_data):
super().__init__(shared_data)
self.Data = ['magnitude']
def fit(self, data):
magnitude = data[0]
n = len(magnitude)
mean = np.mean(magnitude)
std = np.std(magnitude)
S = sum(((magnitude - mean) / std) ** 4)
c1 = float(n * (n + 1)) / ((n - 1) * (n - 2) * (n - 3))
c2 = float(3 * (n - 1) ** 2) / ((n - 2) * (n - 3))
return c1 * S - c2
class Std(Base):
"""Standard deviation of the magnitudes"""
def __init__(self, shared_data):
super().__init__(shared_data)
self.Data = ['magnitude']
def fit(self, data):
magnitude = data[0]
return np.std(magnitude)
class Skew(Base):
"""Skewness of the magnitudes"""
def __init__(self, shared_data):
super().__init__(shared_data)
self.Data = ['magnitude']
def fit(self, data):
magnitude = data[0]
return stats.skew(magnitude)
class StetsonJ(Base):
"""Stetson (1996) variability index, a robust standard deviation"""
def __init__(self, shared_data):
super().__init__(shared_data)
self.Data = ['magnitude', 'time', 'error', 'magnitude2', 'error2']
#lc fields are [data, mjd, error, second_data, aligned_data, aligned_second_data, aligned_mjd]
def fit(self, data):
aligned_magnitude = data[4]
aligned_magnitude2 = data[5]
aligned_error = data[7]
aligned_error2 = data[8]
N = len(aligned_magnitude)
mean_mag = (np.sum(aligned_magnitude/(aligned_error*aligned_error)) /
np.sum(1.0 / (aligned_error * aligned_error)))
mean_mag2 = (np.sum(aligned_magnitude2 / (aligned_error2*aligned_error2)) /
np.sum(1.0 / (aligned_error2 * aligned_error2)))
sigmap = (np.sqrt(N * 1.0 / (N - 1)) *
(aligned_magnitude[:N] - mean_mag) /
aligned_error)
sigmaq = (np.sqrt(N * 1.0 / (N - 1)) *
(aligned_magnitude2[:N] - mean_mag2) /
aligned_error2)
sigma_i = sigmap * sigmaq
J = (1.0 / len(sigma_i) * np.sum(np.sign(sigma_i) *
np.sqrt(np.abs(sigma_i))))
return J
class MaxSlope(Base):
"""
Examining successive (time-sorted) magnitudes, the maximal first difference
(value of delta magnitude over delta time)
"""
def __init__(self, shared_data):
super().__init__(shared_data)
self.Data = ['magnitude', 'time']
def fit(self, data):
magnitude = data[0]
time = data[1]
slope = np.abs(magnitude[1:] - magnitude[:-1]) / (time[1:] - time[:-1])
np.max(slope)
return np.max(slope)
class MedianAbsDev(Base):
def __init__(self, shared_data):
super().__init__(shared_data)
self.Data = ['magnitude']
def fit(self, data):
magnitude = data[0]
median = np.median(magnitude)
devs = (abs(magnitude - median))
return np.median(devs)
class MedianBRP(Base):
"""Median buffer range percentage
Fraction (<= 1) of photometric points within amplitude/10
of the median magnitude
"""
def __init__(self, shared_data):
super().__init__(shared_data)
self.Data = ['magnitude']
def fit(self, data):
magnitude = data[0]
median = np.median(magnitude)
amplitude = (np.max(magnitude) - np.min(magnitude)) / 10
n = len(magnitude)
count = np.sum(np.logical_and(magnitude < median + amplitude,
magnitude > median - amplitude))
return float(count) / n
class PairSlopeTrend(Base):
"""
Considering the last 30 (time-sorted) measurements of source magnitude,
the fraction of increasing first differences minus the fraction of
decreasing first differences.
"""
def __init__(self, shared_data):
super().__init__(shared_data)
self.Data = ['magnitude']
def fit(self, data):
magnitude = data[0]
data_last = magnitude[-30:]
return (float(len(np.where(np.diff(data_last) > 0)[0]) -
len(np.where(np.diff(data_last) <= 0)[0])) / 30)
class FluxPercentileRatioMid20(Base):
def __init__(self, shared_data):
super().__init__(shared_data)
self.Data = ['magnitude']
def fit(self, data):
magnitude = data[0]
sorted_data = np.sort(magnitude)
lc_length = len(sorted_data)
F_60_index = math.ceil(0.60 * lc_length)
F_40_index = math.ceil(0.40 * lc_length)
F_5_index = math.ceil(0.05 * lc_length)
F_95_index = math.ceil(0.95 * lc_length)
F_40_60 = sorted_data[F_60_index] - sorted_data[F_40_index]
F_5_95 = sorted_data[F_95_index] - sorted_data[F_5_index]
F_mid20 = F_40_60 / F_5_95
return F_mid20
class FluxPercentileRatioMid35(Base):
def __init__(self, shared_data):
super().__init__(shared_data)
self.Data = ['magnitude']
def fit(self, data):
magnitude = data[0]
sorted_data = np.sort(magnitude)
lc_length = len(sorted_data)
F_325_index = math.ceil(0.325 * lc_length)
F_675_index = math.ceil(0.675 * lc_length)
F_5_index = math.ceil(0.05 * lc_length)
F_95_index = math.ceil(0.95 * lc_length)
F_325_675 = sorted_data[F_675_index] - sorted_data[F_325_index]
F_5_95 = sorted_data[F_95_index] - sorted_data[F_5_index]
F_mid35 = F_325_675 / F_5_95
return F_mid35
class FluxPercentileRatioMid50(Base):
def __init__(self, shared_data):
super().__init__(shared_data)
self.Data = ['magnitude']
def fit(self, data):
magnitude = data[0]
sorted_data = np.sort(magnitude)
lc_length = len(sorted_data)
F_25_index = math.ceil(0.25 * lc_length)
F_75_index = math.ceil(0.75 * lc_length)
F_5_index = math.ceil(0.05 * lc_length)
F_95_index = math.ceil(0.95 * lc_length)
F_25_75 = sorted_data[F_75_index] - sorted_data[F_25_index]
F_5_95 = sorted_data[F_95_index] - sorted_data[F_5_index]
F_mid50 = F_25_75 / F_5_95
return F_mid50
class FluxPercentileRatioMid65(Base):
def __init__(self, shared_data):
super().__init__(shared_data)
self.Data = ['magnitude']
def fit(self, data):
magnitude = data[0]
sorted_data = np.sort(magnitude)
lc_length = len(sorted_data)
F_175_index = math.ceil(0.175 * lc_length)
F_825_index = math.ceil(0.825 * lc_length)
F_5_index = math.ceil(0.05 * lc_length)
F_95_index = math.ceil(0.95 * lc_length)
F_175_825 = sorted_data[F_825_index] - sorted_data[F_175_index]
F_5_95 = sorted_data[F_95_index] - sorted_data[F_5_index]
F_mid65 = F_175_825 / F_5_95
return F_mid65
class FluxPercentileRatioMid80(Base):
def __init__(self, shared_data):
super().__init__(shared_data)
self.Data = ['magnitude']
def fit(self, data):
magnitude = data[0]
sorted_data = np.sort(magnitude)
lc_length = len(sorted_data)
F_10_index = math.ceil(0.10 * lc_length)
F_90_index = math.ceil(0.90 * lc_length)
F_5_index = math.ceil(0.05 * lc_length)
F_95_index = math.floor(0.95 * lc_length)
F_10_90 = sorted_data[F_90_index] - sorted_data[F_10_index]
F_5_95 = sorted_data[F_95_index] - sorted_data[F_5_index]
F_mid80 = F_10_90 / F_5_95
return F_mid80
class PercentDifferenceFluxPercentile(Base):
def __init__(self, shared_data):
super().__init__(shared_data)
self.Data = ['magnitude']
def fit(self, data):
magnitude = data[0]
median_data = np.median(magnitude)
sorted_data = np.sort(magnitude)
lc_length = len(sorted_data)
F_5_index = math.ceil(0.05 * lc_length)
F_95_index = math.ceil(0.95 * lc_length)
F_5_95 = sorted_data[F_95_index] - sorted_data[F_5_index]
percent_difference = F_5_95 / median_data
return percent_difference
class PercentAmplitude(Base):
def __init__(self, shared_data):
super().__init__(shared_data)
self.Data = ['magnitude']
def fit(self, data):
magnitude = data[0]
median_data = np.median(magnitude)
distance_median = np.abs(magnitude - median_data)
max_distance = np.max(distance_median)
percent_amplitude = max_distance / median_data
return percent_amplitude
class LinearTrend(Base):
def __init__(self, shared_data):
super().__init__(shared_data)
self.Data = ['magnitude', 'time']
def fit(self, data):
magnitude = data[0]
time = data[1]
regression_slope = stats.linregress(time, magnitude)[0]
return regression_slope
class Eta_color(Base):
def __init__(self, shared_data):
super().__init__(shared_data)
self.Data = ['magnitude', 'time', 'magnitude2']
def fit(self, data):
aligned_magnitude = data[4]
aligned_magnitude2 = data[5]
aligned_time = data[6]
B_Rdata = aligned_magnitude - aligned_magnitude2
w = 1.0 / np.power(aligned_time[1:] - aligned_time[:-1], 2)
sigma2 = np.var(B_Rdata)
S1 = sum(w * (B_Rdata[1:] - B_Rdata[:-1]) ** 2)
S2 = sum(w)
eta_B_R = (1 / sigma2) * (S1 / S2)
return eta_B_R
class Eta_e(Base):
def __init__(self, shared_data):
super().__init__(shared_data)
self.Data = ['magnitude', 'time']
def fit(self, data):
magnitude = data[0]
time = data[1]
w = 1.0 / np.power(np.subtract(time[1:], time[:-1]), 2)
sigma2 = np.var(magnitude)
S1 = np.sum(w * np.power(np.subtract(magnitude[1:], magnitude[:-1]), 2))
S2 = np.sum(w)
eta_e = (1 / sigma2) * (S1 / S2)
return eta_e
class Mean(Base):
def __init__(self, shared_data):
super().__init__(shared_data)
self.Data = ['magnitude']
def fit(self, data):
magnitude = data[0]
B_mean = np.mean(magnitude)
return B_mean
class Q31(Base):
def __init__(self, shared_data):
super().__init__(shared_data)
self.Data = ['magnitude']
def fit(self, data):
magnitude = data[0]
percentiles = np.percentile(magnitude, (25, 75))
return percentiles[1] - percentiles[0]
class Q31_color(Base):
def __init__(self, shared_data):
super().__init__(shared_data)
self.Data = ['magnitude', 'time', 'magnitude2']
def fit(self, data):
aligned_magnitude = data[4]
aligned_magnitude2 = data[5]
N = len(aligned_magnitude)
b_r = aligned_magnitude[:N] - aligned_magnitude2[:N]
percentiles = np.percentile(b_r, (25, 75))
return percentiles[1] - percentiles[0]
class AndersonDarling(Base):
def __init__(self, shared_data):
super().__init__(shared_data)
self.Data = ['magnitude']
def fit(self, data):
magnitude = data[0]
ander = stats.anderson(magnitude)[0]
return 1 / (1.0 + np.exp(-10 * (ander - 0.3)))
class Psi_CS_v2(Base):
def __init__(self, shared_data):
super().__init__(shared_data)
self.Data = ['magnitude', 'time']
def fit(self, data):
try:
magnitude = data[0]
time = data[1]
new_time_v2 = self.shared_data['new_time_v2']
folded_data = magnitude[np.argsort(new_time_v2)]
sigma = np.std(folded_data)
N = len(folded_data)
m = np.mean(folded_data)
s = np.cumsum(folded_data - m) * 1.0 / (N * sigma)
R = np.max(s) - | np.min(s) | numpy.min |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 17 14:45:00 2017
@author: shenda
class order: ['A', 'N', 'O', '~']
"""
import numpy as np
from sklearn import ensemble
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from OptF import OptF
from copy import deepcopy
import xgboost as xgb
import ReadData
import random
from collections import Counter
class MyXGB(object):
"""
bottom basic classifier, a warpper for xgboost
the proba order is ['N', 'A', 'O', '~']
"""
def __init__(self, n_estimators=5000,
max_depth=10,
subsample=0.85,
colsample_bytree=0.85,
min_child_weight=4,
num_round = 500):
self.param = {'learning_rate':0.1, 'eta':0.1, 'silent':1,
'objective':'multi:softprob', 'num_class': 4}
self.bst = None
self.num_round = num_round
self.pred = None
my_seed = random.randint(0, 1000)
self.param['n_estimators'] = n_estimators
self.param['max_depth'] = max_depth
self.param['subsample'] = subsample
self.param['colsample_bytree'] = colsample_bytree
self.param['min_child_weight'] = min_child_weight
# self.param['random_state'] = my_seed
self.param['seed'] = my_seed
self.param['n_jobs'] = -1
print(self.param.items())
print(self.num_round)
def fit(self, train_data, train_label):
train_label = ReadData.Label2Index(train_label)
dtrain = xgb.DMatrix(train_data, label=train_label)
self.bst = xgb.train(self.param, dtrain, num_boost_round=self.num_round)
def predict_prob(self, test_data):
dtest = xgb.DMatrix(test_data)
self.pred = self.bst.predict(dtest)
return self.pred
def predict(self, test_data):
pred_prob = self.predict_prob(test_data)
pred_num = np.argmax(pred_prob, axis=1)
pred = ReadData.Index2Label(pred_num)
return pred
def get_importance(self):
return self.bst.get_score(importance_type='gain')
def plot_importance(self):
xgb.plot_importance(self.bst)
class MyLR(object):
"""
Top level classifier, a warpper for Logistic Regression
"""
def __init__(self):
self.clf = LogisticRegression()
def fit(self,
train_qrs_data, train_qrs_label):
train_data =np.array(train_qrs_data)
train_label = train_qrs_label
self.clf.fit(train_data, train_label)
def predict(self, test_qrs_data):
test_data = np.array(test_qrs_data)
if test_data.ndim == 1:
test_data = np.expand_dims(np.array(test_data), axis=0)
return list(self.clf.predict(test_data))
def predict_prob(self, test_qrs_data):
test_qrs_data = np.array(test_qrs_data)
if test_qrs_data.ndim == 1:
test_qrs_data = np.expand_dims(np.array(test_qrs_data), axis=0)
test_data = np.array(test_qrs_data)
return list(list(self.clf.predict_proba(test_data))[0])
else:
test_data = np.array(test_qrs_data)
return self.clf.predict_proba(test_data)
class MyKNN(object):
"""
bottom basic
support unequal length vector
"""
def __init__(self, n_neighbors=3):
self.n_neighbors = n_neighbors
self.train_data = None
self.train_label = None
self.labels = ['N', 'A', 'O', '~']
# self.thresh = [0.5, 0.3, 0.2, ]
def fit(self, train_data, train_label):
self.train_data = np.array(train_data)
self.train_label = np.array(train_label)
def dist(self, vec1, vec2):
res = 0.0
if len(vec1) <= len(vec2):
vec1 = np.r_[vec1, np.zeros(len(vec2)-len(vec1))]
else:
vec2 = np.r_[vec2, np.zeros(len(vec1)-len(vec2))]
dist_num = np.linalg.norm(vec1 - vec2)
return dist_num
def predict_prob(self, test_data):
test_data = np.array(test_data)
pred = []
for i in test_data:
tmp_dist_list = []
tmp_pred = []
for j in self.train_data:
tmp_dist_list.append(self.dist(i, j))
pred_n_neighbors = self.train_label[np.argsort(tmp_dist_list)[:self.n_neighbors]]
pred_counter = Counter(pred_n_neighbors)
# print(pred_counter)
for ii in self.labels:
tmp_pred.append(pred_counter[ii])
pred.append(tmp_pred)
return pred
def predict(self, test_data):
pred = self.predict_prob(test_data)
pred_label = []
for i in pred:
pred_label.append(self.labels[np.argsort(i)[-1]])
return pred_label
class MyGBDT(object):
"""
bottom basic a warpper for GradientBoostingClassifier
"""
def __init__(self):
self.clf = ensemble.GradientBoostingClassifier()
def fit(self,
train_data, train_label):
train_data =np.array(train_data)
self.clf.fit(train_data, train_label)
def predict(self, test_data):
test_data = np.array(test_data)
if test_data.ndim == 1:
test_data = np.expand_dims(np.array(test_data), axis=0)
return list(self.clf.predict(test_data))
def predict_prob(self, test_data):
test_data = np.array(test_data)
if test_data.ndim == 1:
test_data = np.expand_dims(np.array(test_data), axis=0)
test_data = np.array(test_data)
return list(list(self.clf.predict_proba(test_data))[0])
else:
test_data = np.array(test_data)
return self.clf.predict_proba(test_data)
class MyExtraTrees(object):
"""
bottom basic a warpper for ExtraTreesClassifier
"""
def __init__(self):
self.clf = ensemble.ExtraTreesClassifier(n_estimators=100)
def fit(self,
train_data, train_label):
train_data =np.array(train_data)
self.clf.fit(train_data, train_label)
def predict(self, test_data):
test_data = np.array(test_data)
if test_data.ndim == 1:
test_data = np.expand_dims(np.array(test_data), axis=0)
return list(self.clf.predict(test_data))
def predict_prob(self, test_data):
test_data = np.array(test_data)
if test_data.ndim == 1:
test_data = np.expand_dims(np.array(test_data), axis=0)
test_data = np.array(test_data)
return list(list(self.clf.predict_proba(test_data))[0])
else:
test_data = np.array(test_data)
return self.clf.predict_proba(test_data)
class MyAdaBoost(object):
"""
bottom basic a warpper for AdaBoostClassifier
"""
def __init__(self):
self.clf = ensemble.AdaBoostClassifier(n_estimators=100, learning_rate=0.1)
def fit(self,
train_data, train_label):
train_data =np.array(train_data)
self.clf.fit(train_data, train_label)
def predict(self, test_data):
test_data = np.array(test_data)
if test_data.ndim == 1:
test_data = np.expand_dims(np.array(test_data), axis=0)
return list(self.clf.predict(test_data))
def predict_prob(self, test_data):
test_data = np.array(test_data)
if test_data.ndim == 1:
test_data = np.expand_dims(np.array(test_data), axis=0)
test_data = np.array(test_data)
return list(list(self.clf.predict_proba(test_data))[0])
else:
test_data = np.array(test_data)
return self.clf.predict_proba(test_data)
class MyRF(object):
"""
bottom basic a warpper for Random Forest
"""
def __init__(self):
self.clf = ensemble.RandomForestClassifier(
n_estimators=1000, n_jobs=-1)
def fit(self,
train_data, train_label):
train_data =np.array(train_data)
self.clf.fit(train_data, train_label)
def predict(self, test_data):
test_data = np.array(test_data)
if test_data.ndim == 1:
test_data = np.expand_dims(np.array(test_data), axis=0)
return list(self.clf.predict(test_data))
def predict_prob(self, test_data):
test_data = np.array(test_data)
if test_data.ndim == 1:
test_data = np.expand_dims(np.array(test_data), axis=0)
test_data = np.array(test_data)
return list(list(self.clf.predict_proba(test_data))[0])
else:
test_data = np.array(test_data)
return self.clf.predict_proba(test_data)
class MyOptF(object):
"""
bottom basic classifier, a warpper for Opt F-score
"""
def __init__(self, alpha=0.5, epochs=10):
self.clf = OptF(alpha, epochs)
def fit(self, train_data, train_label):
self.clf.fit(train_data, deepcopy(train_label))
def predict(self, test_data):
return list(self.clf.predict(test_data))
def predict_prob(self, test_data):
return self.clf.predict_prob(test_data)
class RF(object):
"""
Top level classifier, a warpper for Random Forest
use long_feature and qrs_feature seperatedly, thus no use any more
deprecated
"""
def __init__(self):
self.clf = ensemble.RandomForestClassifier()
def fit(self,
train_long_data, train_long_label,
train_qrs_data, train_qrs_label):
train_data = np.c_[np.array(train_long_data), np.array(train_qrs_data)]
train_label = train_long_label
self.clf.fit(train_data, train_label)
def predict(self, test_long_data, test_qrs_data):
test_data = np.c_[np.array(test_long_data), np.array(test_qrs_data)]
if test_data.ndim == 1:
test_data = np.expand_dims(np.array(test_data), axis=0)
return list(self.clf.predict(test_data))
def predict_prob(self, test_long_data, test_qrs_data):
test_long_data = np.array(test_long_data)
test_qrs_data = np.array(test_qrs_data)
if test_long_data.ndim == 1 or test_qrs_data.ndim == 1:
test_long_data = np.expand_dims(np.array(test_long_data), axis=0)
test_qrs_data = np.expand_dims(np.array(test_qrs_data), axis=0)
test_data = np.c_[np.array(test_long_data), np.array(test_qrs_data)]
else:
test_data = np.c_[np.array(test_long_data), np.array(test_qrs_data)]
return list(list(self.clf.predict_proba(test_data))[0])
class RFSimp(object):
"""
Top level classifier, a warpper for Random Forest
use long/qrs feature
deprecated
"""
def __init__(self):
self.clf = ensemble.RandomForestClassifier()
def fit(self,
train_qrs_data, train_qrs_label):
train_data =np.array(train_qrs_data)
train_label = train_qrs_label
self.clf.fit(train_data, train_label)
def predict(self, test_qrs_data):
test_data = np.array(test_qrs_data)
if test_data.ndim == 1:
test_data = np.expand_dims(np.array(test_data), axis=0)
return list(self.clf.predict(test_data))
def predict_prob(self, test_qrs_data):
test_qrs_data = np.array(test_qrs_data)
if test_qrs_data.ndim == 1:
test_qrs_data = np.expand_dims(np.array(test_qrs_data), axis=0)
test_data = np.array(test_qrs_data)
return list(list(self.clf.predict_proba(test_data))[0])
else:
test_data = np.array(test_qrs_data)
return self.clf.predict_proba(test_data)
class LR(object):
"""
Top level classifier, a warpper for Logistic Regression
deprecated
"""
def __init__(self):
self.clf = LogisticRegression()
def fit(self,
train_long_data, train_long_label,
train_qrs_data, train_qrs_label):
train_data = np.c_[np.array(train_long_data), np.array(train_qrs_data)]
train_label = train_long_label
self.clf.fit(train_data, train_label)
def predict(self, test_long_data, test_qrs_data):
test_data = np.c_[np.array(test_long_data), np.array(test_qrs_data)]
if test_data.ndim == 1:
test_data = np.expand_dims(np.array(test_data), axis=0)
return list(self.clf.predict(test_data))
def predict_prob(self, test_long_data, test_qrs_data):
test_long_data = np.array(test_long_data)
test_qrs_data = np.array(test_qrs_data)
if test_long_data.ndim == 1 or test_qrs_data.ndim == 1:
test_long_data = np.expand_dims(np.array(test_long_data), axis=0)
test_qrs_data = np.expand_dims(np.array(test_qrs_data), axis=0)
test_data = np.c_[np.array(test_long_data), np.array(test_qrs_data)]
else:
test_data = np.c_[np.array(test_long_data), | np.array(test_qrs_data) | numpy.array |
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
from paddle import ParamAttr
import paddle.nn as nn
import paddle.nn.functional as F
from paddle.nn.initializer import Normal, Constant
from paddlex.ppdet.core.workspace import register
from paddlex.ppdet.modeling import bbox_utils
from paddlex.ppdet.modeling.proposal_generator.target_layer import RBoxAssigner
import numpy as np
class S2ANetAnchorGenerator(nn.Layer):
"""
AnchorGenerator by paddle
"""
def __init__(self, base_size, scales, ratios, scale_major=True, ctr=None):
super(S2ANetAnchorGenerator, self).__init__()
self.base_size = base_size
self.scales = paddle.to_tensor(scales)
self.ratios = paddle.to_tensor(ratios)
self.scale_major = scale_major
self.ctr = ctr
self.base_anchors = self.gen_base_anchors()
@property
def num_base_anchors(self):
return self.base_anchors.shape[0]
def gen_base_anchors(self):
w = self.base_size
h = self.base_size
if self.ctr is None:
x_ctr = 0.5 * (w - 1)
y_ctr = 0.5 * (h - 1)
else:
x_ctr, y_ctr = self.ctr
h_ratios = paddle.sqrt(self.ratios)
w_ratios = 1 / h_ratios
if self.scale_major:
ws = (w * w_ratios[:] * self.scales[:]).reshape([-1])
hs = (h * h_ratios[:] * self.scales[:]).reshape([-1])
else:
ws = (w * self.scales[:] * w_ratios[:]).reshape([-1])
hs = (h * self.scales[:] * h_ratios[:]).reshape([-1])
base_anchors = paddle.stack(
[
x_ctr - 0.5 * (ws - 1), y_ctr - 0.5 * (hs - 1),
x_ctr + 0.5 * (ws - 1), y_ctr + 0.5 * (hs - 1)
],
axis=-1)
base_anchors = paddle.round(base_anchors)
return base_anchors
def _meshgrid(self, x, y, row_major=True):
yy, xx = paddle.meshgrid(x, y)
yy = yy.reshape([-1])
xx = xx.reshape([-1])
if row_major:
return xx, yy
else:
return yy, xx
def forward(self, featmap_size, stride=16):
# featmap_size*stride project it to original area
base_anchors = self.base_anchors
feat_h = featmap_size[0]
feat_w = featmap_size[1]
shift_x = paddle.arange(0, feat_w, 1, 'int32') * stride
shift_y = paddle.arange(0, feat_h, 1, 'int32') * stride
shift_xx, shift_yy = self._meshgrid(shift_x, shift_y)
shifts = paddle.stack([shift_xx, shift_yy, shift_xx, shift_yy], axis=-1)
all_anchors = base_anchors[:, :] + shifts[:, :]
all_anchors = all_anchors.reshape([feat_h * feat_w, 4])
return all_anchors
def valid_flags(self, featmap_size, valid_size):
feat_h, feat_w = featmap_size
valid_h, valid_w = valid_size
assert valid_h <= feat_h and valid_w <= feat_w
valid_x = paddle.zeros([feat_w], dtype='uint8')
valid_y = paddle.zeros([feat_h], dtype='uint8')
valid_x[:valid_w] = 1
valid_y[:valid_h] = 1
valid_xx, valid_yy = self._meshgrid(valid_x, valid_y)
valid = valid_xx & valid_yy
valid = valid[:, None].expand(
[valid.size(0), self.num_base_anchors]).reshape([-1])
return valid
class AlignConv(nn.Layer):
def __init__(self, in_channels, out_channels, kernel_size=3, groups=1):
super(AlignConv, self).__init__()
self.kernel_size = kernel_size
self.align_conv = paddle.vision.ops.DeformConv2D(
in_channels,
out_channels,
kernel_size=self.kernel_size,
padding=(self.kernel_size - 1) // 2,
groups=groups,
weight_attr=ParamAttr(initializer=Normal(0, 0.01)),
bias_attr=None)
@paddle.no_grad()
def get_offset(self, anchors, featmap_size, stride):
"""
Args:
anchors: [M,5] xc,yc,w,h,angle
featmap_size: (feat_h, feat_w)
stride: 8
Returns:
"""
anchors = paddle.reshape(anchors, [-1, 5]) # (NA,5)
dtype = anchors.dtype
feat_h, feat_w = featmap_size
pad = (self.kernel_size - 1) // 2
idx = paddle.arange(-pad, pad + 1, dtype=dtype)
yy, xx = paddle.meshgrid(idx, idx)
xx = paddle.reshape(xx, [-1])
yy = paddle.reshape(yy, [-1])
# get sampling locations of default conv
xc = paddle.arange(0, feat_w, dtype=dtype)
yc = paddle.arange(0, feat_h, dtype=dtype)
yc, xc = paddle.meshgrid(yc, xc)
xc = paddle.reshape(xc, [-1, 1])
yc = paddle.reshape(yc, [-1, 1])
x_conv = xc + xx
y_conv = yc + yy
# get sampling locations of anchors
# x_ctr, y_ctr, w, h, a = np.unbind(anchors, dim=1)
x_ctr = anchors[:, 0]
y_ctr = anchors[:, 1]
w = anchors[:, 2]
h = anchors[:, 3]
a = anchors[:, 4]
x_ctr = paddle.reshape(x_ctr, [x_ctr.shape[0], 1])
y_ctr = paddle.reshape(y_ctr, [y_ctr.shape[0], 1])
w = paddle.reshape(w, [w.shape[0], 1])
h = paddle.reshape(h, [h.shape[0], 1])
a = paddle.reshape(a, [a.shape[0], 1])
x_ctr = x_ctr / stride
y_ctr = y_ctr / stride
w_s = w / stride
h_s = h / stride
cos, sin = paddle.cos(a), paddle.sin(a)
dw, dh = w_s / self.kernel_size, h_s / self.kernel_size
x, y = dw * xx, dh * yy
xr = cos * x - sin * y
yr = sin * x + cos * y
x_anchor, y_anchor = xr + x_ctr, yr + y_ctr
# get offset filed
offset_x = x_anchor - x_conv
offset_y = y_anchor - y_conv
# x, y in anchors is opposite in image coordinates,
# so we stack them with y, x other than x, y
offset = paddle.stack([offset_y, offset_x], axis=-1)
# NA,ks*ks*2
# [NA, ks, ks, 2] --> [NA, ks*ks*2]
offset = paddle.reshape(offset, [offset.shape[0], -1])
# [NA, ks*ks*2] --> [ks*ks*2, NA]
offset = paddle.transpose(offset, [1, 0])
# [NA, ks*ks*2] --> [1, ks*ks*2, H, W]
offset = paddle.reshape(offset, [1, -1, feat_h, feat_w])
return offset
def forward(self, x, refine_anchors, stride):
featmap_size = (x.shape[2], x.shape[3])
offset = self.get_offset(refine_anchors, featmap_size, stride)
x = F.relu(self.align_conv(x, offset))
return x
@register
class S2ANetHead(nn.Layer):
"""
S2Anet head
Args:
stacked_convs (int): number of stacked_convs
feat_in (int): input channels of feat
feat_out (int): output channels of feat
num_classes (int): num_classes
anchor_strides (list): stride of anchors
anchor_scales (list): scale of anchors
anchor_ratios (list): ratios of anchors
target_means (list): target_means
target_stds (list): target_stds
align_conv_type (str): align_conv_type ['Conv', 'AlignConv']
align_conv_size (int): kernel size of align_conv
use_sigmoid_cls (bool): use sigmoid_cls or not
reg_loss_weight (list): loss weight for regression
"""
__shared__ = ['num_classes']
__inject__ = ['anchor_assign']
def __init__(self,
stacked_convs=2,
feat_in=256,
feat_out=256,
num_classes=15,
anchor_strides=[8, 16, 32, 64, 128],
anchor_scales=[4],
anchor_ratios=[1.0],
target_means=0.0,
target_stds=1.0,
align_conv_type='AlignConv',
align_conv_size=3,
use_sigmoid_cls=True,
anchor_assign=RBoxAssigner().__dict__,
reg_loss_weight=[1.0, 1.0, 1.0, 1.0, 1.0],
cls_loss_weight=[1.0, 1.0]):
super(S2ANetHead, self).__init__()
self.stacked_convs = stacked_convs
self.feat_in = feat_in
self.feat_out = feat_out
self.anchor_list = None
self.anchor_scales = anchor_scales
self.anchor_ratios = anchor_ratios
self.anchor_strides = anchor_strides
self.anchor_base_sizes = list(anchor_strides)
self.target_means = target_means
self.target_stds = target_stds
assert align_conv_type in ['AlignConv', 'Conv', 'DCN']
self.align_conv_type = align_conv_type
self.align_conv_size = align_conv_size
self.use_sigmoid_cls = use_sigmoid_cls
self.cls_out_channels = num_classes if self.use_sigmoid_cls else 1
self.sampling = False
self.anchor_assign = anchor_assign
self.reg_loss_weight = reg_loss_weight
self.cls_loss_weight = cls_loss_weight
self.s2anet_head_out = None
# anchor
self.anchor_generators = []
for anchor_base in self.anchor_base_sizes:
self.anchor_generators.append(
S2ANetAnchorGenerator(anchor_base, anchor_scales,
anchor_ratios))
self.anchor_generators = paddle.nn.LayerList(self.anchor_generators)
self.add_sublayer('s2anet_anchor_gen', self.anchor_generators)
self.fam_cls_convs = nn.Sequential()
self.fam_reg_convs = nn.Sequential()
for i in range(self.stacked_convs):
chan_in = self.feat_in if i == 0 else self.feat_out
self.fam_cls_convs.add_sublayer(
'fam_cls_conv_{}'.format(i),
nn.Conv2D(
in_channels=chan_in,
out_channels=self.feat_out,
kernel_size=3,
padding=1,
weight_attr=ParamAttr(initializer=Normal(0.0, 0.01)),
bias_attr=ParamAttr(initializer=Constant(0))))
self.fam_cls_convs.add_sublayer('fam_cls_conv_{}_act'.format(i),
nn.ReLU())
self.fam_reg_convs.add_sublayer(
'fam_reg_conv_{}'.format(i),
nn.Conv2D(
in_channels=chan_in,
out_channels=self.feat_out,
kernel_size=3,
padding=1,
weight_attr=ParamAttr(initializer=Normal(0.0, 0.01)),
bias_attr=ParamAttr(initializer=Constant(0))))
self.fam_reg_convs.add_sublayer('fam_reg_conv_{}_act'.format(i),
nn.ReLU())
self.fam_reg = nn.Conv2D(
self.feat_out,
5,
1,
weight_attr=ParamAttr(initializer=Normal(0.0, 0.01)),
bias_attr=ParamAttr(initializer=Constant(0)))
prior_prob = 0.01
bias_init = float(- | np.log((1 - prior_prob) / prior_prob) | numpy.log |
#!/usr/bin/env pytest
import numpy as np
import pytest
import siglib as sl
@pytest.mark.parametrize(
"x,frame_length,frame_step,pad,pad_value,expected",
(
(np.arange(10), 5, 5, True, 0j, np.arange(10, dtype=np.complex).reshape(2, 5)),
(np.arange(10), 5, 5, False, 0j, np.arange(10, dtype=np.complex).reshape(2, 5)),
),
)
def test_frame(x, frame_length, frame_step, pad, pad_value, expected):
result = sl.frame(x, frame_length, frame_step, pad=pad, pad_value=pad_value)
np.testing.assert_equal(result, expected)
@pytest.mark.parametrize(
"x,ntaps,expected",
(
(np.zeros(10), 5, [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]),
(np.arange(10), 5, [4, 4, 4, 4, 4, 5, 6, 7, 8, 9]),
),
)
def test_closing(x, ntaps, expected):
result = sl.closing(x, ntaps)
expected = np.array(expected, dtype=np.complex)
np.testing.assert_equal(result, expected)
@pytest.mark.parametrize(
"x,ntaps,expected",
(
(np.zeros(10), 5, [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]),
(np.arange(10), 5, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]),
),
)
def test_opening(x, ntaps, expected):
result = sl.opening(x, ntaps)
expected = np.array(expected, dtype=np.complex)
np.testing.assert_equal(result, expected)
@pytest.mark.parametrize(
"x,idx,ntaps,expected",
((np.arange(10 ** 2), np.array([45.567]), 5, [44.96565413]),),
)
def test_resample(x, idx, ntaps, expected):
result = sl.resample(x, idx, ntaps)
np.testing.assert_allclose(result, expected, rtol=1e-9)
@pytest.mark.parametrize(
"x,delay,pad,pad_value,expected",
(
(
[1 + 3j, 4 + 2j, 5 + 6j, 1 + 0j],
1,
True,
1 + 0j,
[10 - 10j, 32 + 14j, 5.0 - 6j, 1.0 + 0j],
),
(
[1 + 3j, 4 + 2j, 5 + 6j, 1 + 0j],
1,
False,
1 + 0j,
[10 - 10j, 32 + 14j, 5.0 - 6j],
),
),
)
def test_dcm(x, delay, pad, pad_value, expected):
x = | np.array(x) | numpy.array |
"""
Simplified transforms.py
(c) Delirium Digital Limited 2020
Written by <NAME>
"""
import cv2
import numpy as np
import math
# from scipy.spatial.transform import Rotation as R
import tensorflow as tf
import logging
logger = logging.getLogger(__name__)
def resize_image(data_numpy, x_scale, y_scale):
return cv2.resize(data_numpy, None, fx=x_scale, fy=y_scale, interpolation=cv2.INTER_LINEAR)
def rotate_image(image, angle):
"""
Rotate image around center by angle (degrees)
"""
(h, w) = image.shape[:2]
center = (w / 2, h / 2)
scale = 1.0
M = cv2.getRotationMatrix2D(center, angle, scale)
rotated = cv2.warpAffine(image, M, (w, h))
return rotated
def scale_keypoints(keypoints, x_scale=1, y_scale=1, z_scale=1):
"""
Scale keypoints based on x and y scales (useful for image crop and resize)
"""
scaled_keypoints = []
scale_matrix = np.array([x_scale, y_scale, z_scale])
for k in keypoints:
scaled_keypoints.append(k * scale_matrix)
return np.array(scaled_keypoints)
def center_keypoints(keypoints, center_x, center_y):
"""
Center keypoints
TODO: Is this needed?
"""
centered_keypoints = []
for keypoint in keypoints:
kp = [center_x + keypoint[0], center_y - keypoint[1], keypoint[2]]
centered_keypoints.append(kp)
return np.array(centered_keypoints)
def offset_keypoints(keypoints, x_offset=0, y_offset=0, z_offset=0):
keypoints[:, 0] += x_offset
keypoints[:, 1] += y_offset
keypoints[:, 2] += z_offset
return keypoints
def flip_keypoints(keypoints):
"""
Flipped keypoints horizontally (x = -x)
"""
for i, k in enumerate(keypoints):
keypoints[i, 0] = -keypoints[i, 0]
return keypoints
def rotate(x, y, xo, yo, theta):
"""rotate x,y around xo,yo by theta (rad)"""
xr = math.cos(theta) * (x - xo) - math.sin(theta) * (y - yo) + xo
yr = math.sin(theta) * (x - xo) + math.cos(theta) * (y - yo) + yo
return [xr, yr]
def rotate_keypoints(keypoints, origin, angle):
"""
Rotate keypoints
Doesn't impact keypoints_vis as by this point the image is square (TODO: Validate this is true!)
:param keypoints:
:param angle: Angle in degrees (to align with rotate_image and opencv angles)
:param origin: point to rotate around
:return keypoints:
"""
rads = -np.deg2rad(angle)
rotated_keypoints = []
for keypoint in keypoints:
kp_r = rotate(keypoint[0], keypoint[1], origin[0], origin[1], rads)
rotated_keypoints.append(kp_r)
return rotated_keypoints
def flip_back(output_flipped, matched_parts):
"""
output_flipped: numpy.ndarray(batch_size, num_keypoints, height, width)
"""
assert output_flipped.ndim == 4, \
'output_flipped should be [batch_size, num_keypoints, height, width]'
output_flipped = output_flipped[:, :, :, ::-1]
for pair in matched_parts:
tmp = output_flipped[:, pair[0], :, :].copy()
output_flipped[:, pair[0], :, :] = output_flipped[:, pair[1], :, :]
output_flipped[:, pair[1], :, :] = tmp
return output_flipped
# From https://github.com/nsantavas/Attention-A-Lightweight-2D-Hand-Pose-Estimation-Approach/blob/2136e93586f0d0eb40518f47d2b9f78c6220ea33/model_train.py#L62
def augment_image(image):
"""
Augment dataset with random brightness, saturation, contrast, and image quality. Then it is casted to bfloat16 and normalized
"""
image = tf.cast(image, tf.uint8)
image = tf.image.random_jpeg_quality(image, min_jpeg_quality=70, max_jpeg_quality=100)
image = tf.cast(image, tf.float32)
image = tf.image.random_brightness(image, max_delta=25 / 255)
image = tf.image.random_saturation(image, lower=0.3, upper=1.7)
image = tf.image.random_contrast(image, lower=0.3, upper=1.7)
image = tf.cast(image, tf.float32)
return image
# ------------------------------------------------------------------------------
# Code below from https://github.com/microsoft/human-pose-estimation.pytorch/blob/master/lib/utils/transforms.py
#
# Copyright (c) Microsoft
# Licensed under the MIT License.
# Written by <NAME> (<EMAIL>)
# ------------------------------------------------------------------------------
def fliplr_joints(joints, joints_vis, width, matched_parts):
"""
flip coords
"""
# Flip horizontal
joints[:, 0] = width - joints[:, 0] - 1
# Change left-right parts
for pair in matched_parts:
joints[pair[0], :], joints[pair[1], :] = \
joints[pair[1], :], joints[pair[0], :].copy()
joints_vis[pair[0]], joints_vis[pair[1]] = \
joints_vis[pair[1]], joints_vis[pair[0]].copy()
_joints_vis = np.transpose(np.array([joints_vis, joints_vis, joints_vis]), axes=[1, 0])
return joints*_joints_vis, joints_vis
def get_affine_transform(center,
scale,
rot,
output_size,
shift= | np.array([0, 0], dtype=np.float32) | numpy.array |
"""
Project
-------
"""
from typing import Any
from typing import Callable
from typing import Dict
from typing import List
from typing import Optional
from typing import Tuple
from typing import Union
import numpy as np
import scipy.sparse as sp # type: ignore
from anndata import AnnData # type: ignore
import metacells.parameters as pr
import metacells.utilities as ut
__all__ = [
"renormalize_query_by_atlas",
"project_query_onto_atlas",
"find_systematic_genes",
"project_atlas_to_query",
"find_biased_genes",
"compute_query_projection",
]
@ut.logged()
@ut.timed_call()
def renormalize_query_by_atlas( # pylint: disable=too-many-statements,too-many-branches
what: str = "__x__",
*,
adata: AnnData,
qdata: AnnData,
var_annotations: Dict[str, Any],
layers: Dict[str, Any],
varp_annotations: Dict[str, Any],
) -> Optional[AnnData]:
"""
Add an ``ATLASNORM`` pseudo-gene to query metacells data to compensate for the query having filtered out many genes.
This renormalizes the gene fractions in the query to fit the atlas in case the query has aggressive filtered a
significant amount of genes.
**Input**
Annotated query ``qdata`` and atlas ``adata``, where the observations are cells and the variables are genes, where
``X`` is a per-variable-per-observation matrix or the name of a per-variable-per-observation annotation containing
such a matrix.
**Returns**
None if no normalization is needed (or possible). Otherwise, a copy of the query metacells data, with an additional
variable (gene) called ``ATLASNORM`` to the query data, such that the total number of UMIs for each query metacells
is as expected given the total number of UMIs of the genes common to the query and the atlas. This is skipped if the
query and the atlas have exactly the same list of genes, or if if the query already contains a high number of genes
missing from the atlas so that the total number of UMIs for the query metacells is already at least the expected
based on the common genes.
**Computation Parameters**
1. Computes how many UMIs should be added to each query metacell so that its (total UMIs / total common gene UMIs)
would be the same as the (total atlas UMIs / total atlas common UMIs). If this is zero (or negative), stop.
2. Add an ``ATLASNORM`` pseudo-gene to the query with the above amount of UMIs. For each per-variable (gene)
observation, add the value specified in ``var_annotations``, whose list of keys must cover the set of
per-variable annotations in the query data. For each per-observation-per-variable layer, add the value specified
in ``layers``, whose list of keys must cover the existing layers. For each per-variable-per-variable annotation,
add the value specified in ``varp_annotations``.
"""
for name in qdata.var.keys():
if "|" not in name and name not in var_annotations.keys():
raise RuntimeError(f"missing default value for variable annotation {name}")
for name in qdata.layers.keys():
if name not in layers.keys():
raise RuntimeError(f"missing default value for layer {name}")
for name in qdata.varp.keys():
if name not in varp_annotations.keys():
raise RuntimeError(f"missing default value for variable-variable {name}")
if list(qdata.var_names) == list(adata.var_names):
return None
query_genes_list = list(qdata.var_names)
atlas_genes_list = list(adata.var_names)
common_genes_list = list(sorted(set(qdata.var_names) & set(adata.var_names)))
query_gene_indices = np.array([query_genes_list.index(gene) for gene in common_genes_list])
atlas_gene_indices = np.array([atlas_genes_list.index(gene) for gene in common_genes_list])
common_qdata = ut.slice(qdata, name=".common", vars=query_gene_indices)
common_adata = ut.slice(adata, name=".common", vars=atlas_gene_indices)
assert list(common_qdata.var_names) == list(common_adata.var_names)
atlas_total_umis_per_metacell = ut.get_o_numpy(adata, what, sum=True)
atlas_common_umis_per_metacell = ut.get_o_numpy(common_adata, what, sum=True)
atlas_total_umis = np.sum(atlas_total_umis_per_metacell)
atlas_common_umis = np.sum(atlas_common_umis_per_metacell)
atlas_disjoint_umis_fraction = atlas_total_umis / atlas_common_umis - 1.0
ut.log_calc("atlas_total_umis", atlas_total_umis)
ut.log_calc("atlas_common_umis", atlas_common_umis)
ut.log_calc("atlas_disjoint_umis_fraction", atlas_disjoint_umis_fraction)
query_total_umis_per_metacell = ut.get_o_numpy(qdata, what, sum=True)
query_common_umis_per_metacell = ut.get_o_numpy(common_qdata, what, sum=True)
query_total_umis = np.sum(query_total_umis_per_metacell)
query_common_umis = | np.sum(query_common_umis_per_metacell) | numpy.sum |
import os
import sys
import yaml
import json
import time
import argparse
import numpy as np
import pickle
import matplotlib.pyplot as plt
from torch.utils.data import DataLoader
import src.utils
import src.dataset
import src.evaluation
if __name__ == "__main__":
# config file
parser = argparse.ArgumentParser(description="Test linear model.")
parser.add_argument('--config', type=str, default="config_linear_test.yaml")
args = parser.parse_args()
### END CONFIG ###
### PATHS & CONFIG
project_root = os.getcwd()
data_root = os.path.join(project_root, "datasets/maad")
exp_root = os.path.join(project_root, "experiments")
config_root = os.path.join(project_root, "config")
# config
config_path = os.path.join(config_root, args.config)
with open(config_path, "r") as fin:
config = yaml.load(fin, Loader=yaml.FullLoader)
# data
data_path_test = os.path.join(data_root, config["dataset"]["set"])
# experiment path
method = config["model"]["type"]
run_name = method
date_time = src.utils.get_current_time()
run_name = date_time + "_" + run_name
exp_dir = os.path.join(exp_root, run_name)
if not os.path.exists(exp_dir):
os.makedirs(exp_dir)
# create evaluation directory
eval_dir = "eval_" + src.utils.get_current_time()
eval_path = os.path.join(exp_dir, eval_dir)
if not os.path.isdir(eval_path):
os.makedirs(eval_path)
### DATA
dset_test = src.dataset.MAADDataset(data_path_test, obs_len=config["model"]["obs_len"],
adj_type="identity")
loader_test = DataLoader(dset_test,
batch_size=1,
shuffle=False,
num_workers=1)
### PREDICTION
print("\nPredicting...")
pred_start = time.time()
prediction_data = {}
step = 0
for cnt, batch in enumerate(loader_test):
step += 1
# get data
obs_traj, obs_traj_rel, frame_ids, seq_ids, labels, V_obs, A_obs = batch
# prepare data
obs_traj = obs_traj.numpy()[0] # its anyway batch size = 1
frame_ids = frame_ids.numpy()[0].tolist()
seq_ids = seq_ids.numpy()[0]
labels = labels.numpy()[0]
# init linear trajectory
linear_traj = np.zeros(obs_traj.shape)
N = obs_traj.shape[0]
# model each agent individually
for i in range(N):
# get agent trajectory
agent_traj = obs_traj[i]
# trajectory features
start_pos = agent_traj[:, 0]
end_pos = agent_traj[:, -1]
n_ts = agent_traj.shape[1]
if method == "cvm":
# CVM
velocity = agent_traj[:, 1] - agent_traj[:, 0]
approx_agent_traj = np.zeros(agent_traj.shape) + velocity[:, np.newaxis]
approx_agent_traj[:, 0] = start_pos
approx_agent_traj = np.cumsum(approx_agent_traj, axis=1)
elif method == "lti":
# LTI
x_interp = np.linspace(start_pos[0], end_pos[0], n_ts)
y_interp = np.linspace(start_pos[1], end_pos[1], n_ts)
approx_agent_traj = np.zeros(agent_traj.shape)
approx_agent_traj[0] = x_interp
approx_agent_traj[1] = y_interp
else:
sys.exit("Unknown model type {}. Abort!".format(method))
# add to matrix
linear_traj[i] = approx_agent_traj
if True:
plt.plot(agent_traj[0], agent_traj[1])
plt.plot(approx_agent_traj[0], approx_agent_traj[1])
# prepare data for dict export
obs_traj = np.transpose(obs_traj, (2, 0, 1))
linear_traj = np.transpose(linear_traj, (2, 0, 1))
frame_ids = np.transpose(frame_ids, (2, 0, 1))
labels = | np.transpose(labels, (2, 0, 1)) | numpy.transpose |
import numpy as np
import xarray as xr
import matplotlib.pyplot as plt
import cartopy.crs as ccrs
import cartopy.feature as cfeature
from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER
def format_lon_lat(ax, proj, lon_min, lon_max, lat_min, lat_max, title=''):
from cartopy.mpl.ticker import LongitudeFormatter, LatitudeFormatter
#from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER
ax.set_extent([lon_min, lon_max,
lat_min, lat_max], proj)
# Add coastline
ax.coastlines('50m')
# Modify the title
ax.set_title(title)
# Set lon labels
lon_labels = | np.arange(lon_min, lon_max + 1, 10) | numpy.arange |
"""
Copyright 2018 IBM Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from gensim.utils import simple_preprocess
from gensim.models import Word2Vec
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
import numpy as np
class CustomParVec():
'''
Custom Paragraph Vector. Each paragraph(or sentence) is the sum of each of its word's Word2Vec vector
representation scaled by that word's tf-idf.
'''
def __init__(self, word_sentence_list, workers = 2, dimensions = 100, min_word_count = 2, context = 5, downsampling = 0, tfidf = True, pre_trained = None, supervised_docs = None):
'''
Args:
word_sentence_list (list[list[str]]) : List of lists of words that make up a paragraph(or sentence).
workers (int) : Number of threads to run in parallel (Default = 2).
dimensions (int) : Vector dimensionality (Default = 100).
min_word_count (int) : Minimum word count (Default = 2).
context (int) : Context window size (Default = 5).
downsampling (int) : Downsample setting for frequent words (Default = 0).
tfidf (bool) : Specify whether or not to use idf in scaling (Default = True).
pre_trained (Word2Vec) : Use a pre-trained Word2Vec model (Default = None).
supervised_docs (list[str]) : List of sentences from some "ground truth" (Default = None).
'''
self.dimensions = dimensions # Set the number of dimension
if not pre_trained:
self.word2vec_model = Word2Vec(word_sentence_list, workers=workers, \
size=self.dimensions, min_count = min_word_count, \
window = context, sample = downsampling)
self.word2vec_model.init_sims(replace=True) # used for memory efficiency
else:
self.word2vec_model = pre_trained
self.sentences = [' '.join(words) for words in word_sentence_list] # Keep a list of the full sentences themselves.
self.tf_idf_obj = TfidfVectorizer(use_idf = tfidf) # Create TfidfVectorizer object
self.tf_idf_obj.fit(self.sentences) # Transform and fit tf-idf to all sentences(could be paragraphs)
self.tf_idf = self.tf_idf_obj.transform(self.sentences)
self.word_index = self.tf_idf_obj.get_feature_names() # Keep track of words by index for lookups
if supervised_docs:
self.extra_tf_idf_obj = TfidfVectorizer(use_idf = tfidf) # Create TfidfVectorizer object
self.extra_tf_idf_obj.fit(supervised_docs) # Transform and fit tf-idf to all sentences(could be paragraphs)
self.extra_tf_idf = self.extra_tf_idf_obj.transform(supervised_docs)
self.extra_word_index = self.extra_tf_idf_obj.get_feature_names() # Keep track of words by index for lookups
else:
self.extra_tf_idf_obj = None
def learnVectors(self):
'''
Create a vector representation of every paragraph(or sentence) in the initial data provided.
Yields:
numpy.ndarray: Next numpy array representing the paragraph (or sentence).
'''
rows, cols = self.tf_idf.nonzero() # Get the rows and column indices of non zero tf-idf values
curr_line = 0
curr_vec = np.zeros(self.dimensions)
for row, col in zip(rows, cols):
if curr_line == row: # Check that the current word belongs to the same paragraph (or sentence).
try:
# Infer the vector of the current word by scaling the word's word2vec vector by its tf-idf value.
# Add that inferred vector to the current vector representing the current paragraph.
curr_vec += (self.word2vec_model[(self.word_index[col])] * self.tf_idf[row, col])
except:
continue
else:
# If we are on the next paragraph, yield the current vector and reset it.
yield(curr_vec)
curr_line = row
curr_vec = np.zeros(self.dimensions)
try:
curr_vec = self.word2vec_model[(self.word_index[col])] * self.tf_idf[row, col]
except:
continue
def train(self):
self.vectors = list(self.learnVectors())
def getMostSimilar(self, sentence, top_n = 10, threshold = 0.5, sentences = None, vectors = None):
'''
Given a new sentence, find the closest top_n elements
Args:
sentence(string) : Text we want to find most similar to.
top_n (int) : Total number of most similar tuples we want returned (Default value is 5).
threshold (float) : Minimum Cosine Distance to be returned
sentences (list[string]) : List of sentences to be compared to
vectors[list[numpy nd array]] : Vector embedding of sentences
Returns:
list[(float, string)]: A list of (cosine similarity, sentence) tuples of size top_n closest to the
input sentence.
'''
inferred_vector = self.inferVector(sentence)
if sentences and vectors:
corpus = sentences
vecs = vectors
else:
corpus = self.sentences
vecs = self.vectors
cos_similarities = np.ravel(cosine_similarity(inferred_vector.reshape(1,-1), vecs))
most_similar = np.argpartition(-cos_similarities, top_n)[:top_n]
return [(cos_similarities[sentence_index], corpus[sentence_index]) for sentence_index in most_similar if cos_similarities[sentence_index] >= threshold]
def inferVector(self, line):
if self.extra_tf_idf_obj:
return self.inferVector2(line)
return self.inferVector1(line)
def inferVector1(self, line):
'''
Given a new line, infer a custom vector representation using the corpus tfidf.
Args:
line : new sentence to be inferred
Returns:
numpy.ndarray : vector representation of the line
'''
line = ' '.join(simple_preprocess(line)) # pre-process the line
line_tf_idf = self.tf_idf_obj.transform([line]) # infer the tf-idf values for the words in the line
rows, cols = line_tf_idf.nonzero()
new_vec = np.zeros(self.dimensions)
# Apply the same sentence to vector conversion as above.
for col in cols:
try:
new_vec += (self.word2vec_model[(self.word_index[col])] * line_tf_idf[0, col])
except:
continue
return | np.asarray(new_vec) | numpy.asarray |
import os.path
import numpy as np
class MnistNShot:
def __init__(self, root, batchsz, n_way, k_shot, k_query, imgsz):
"""
Different from mnistNShot, the
:param root:
:param batchsz: task num
:param n_way:
:param k_shot:
:param k_qry:
:param imgsz:
"""
self.resize = imgsz
if not os.path.isfile(os.path.join(root, 'mnist.npy')):
# TODO : May add formatting npy file here
raise FileNotFoundError("Must have the npy file first")
else:
# if data.npy exists, just load it.
self.x = np.load(os.path.join(root, 'mnist.npy'))
print('load from mnist.npy.')
# TODO: can not shuffle here, we must keep training and test set distinct!
# mnist 10 classes
self.x_train, self.x_test = self.x[:10], self.x[0:]
# self.normalization()
# normalization manually
self.x_train /= 255.
self.x_test /= 255.
self.batchsz = batchsz
self.n_cls = self.x.shape[0] # 1623
self.n_way = n_way # n way
self.k_shot = k_shot # k shot
self.k_query = k_query # k query
# must less than number of images per class
# assert (k_shot + k_query) <= 20
# save pointer of current read batch in total cache
self.indexes = {"train": 0, "test": 0}
self.datasets = {"train": self.x_train, "test": self.x_test} # original data cached
print("DB: train", self.x_train.shape, "test", self.x_test.shape)
self.datasets_cache = {"train": self.load_data_cache(self.datasets["train"]), # current epoch data cached
"test": self.load_data_cache(self.datasets["test"])}
def normalization(self):
"""
Normalizes our data, to have a mean of 0 and sdt of 1
"""
# TODO: you can set mean and std here
self.mean = | np.mean(self.x_train) | numpy.mean |
import glob, os, time, sys
import numpy as np
import scipy.linalg as sl, scipy.stats, scipy.special
class OutlierGibbs(object):
"""Gibbs-based pulsar-timing outlier analysis.
Based on:
Article by <NAME>:
"Robust and Accurate Inference via a Mixture
of Gaussian and Student's t Errors",
https://doi.org/10.1080/10618600.2018.1537925
arXiv:1707.03057.
Article by <NAME>:
"Controlling Outlier Contamination In Multimessenger
Time-domain Searches For Supermasssive Binary Black Holes",
(In prep. 2021)
Code from https://github.com/jellis18/gibbs_student_t
Authors:
<NAME>, <NAME>, <NAME>
Example usage:
> gibbs = OutlierGibbs(pta, model='mixture', vary_df=True,
theta_prior='beta', vary_alpha=True)
> params = np.array([p.sample() for p in gibbs.params]).flatten()
> gibbs.sample(params, outdir='./outlier/',
niter=10000, resume=False)
> poutlier = np.mean(gibbs.poutchain, axis = 0)
# Gives marginalized outlier probability of each TOA
"""
def __init__(self, pta, model='mixture',
m=0.01, tdf=4, vary_df=True,
theta_prior='beta',
alpha=1e10, vary_alpha=True,
pspin=None):
"""
Parameters
-----------
pta : object
instance of a pta object for a single pulsar
model : str
type of outlier model
[default = mixture of Gaussian and Student's t]
tdf : int
degrees of freedom for Student's t outlier distribution
[default = 4]
m : float
a-priori proportion of observations that are outliers
[default = 0.01]
vary_df : boolean
vary the Student's t degrees of freedom
[default = True]
theta_prior : str
prior outlier probability
[default = beta distribution]
alpha : float
relative width of outlier to inlier distribution
[default = 1e10]
vary_alpha : boolean
vary the relative outlier to inlier width
[default = True]
pspin : float
pulsar spin period for vvh17 model, arXiv:1609.02144
[default = None]
"""
self.pta = pta
if np.any(['basis_ecorr' in key for
key in self.pta._signal_dict.keys()]):
pass
else:
print('ERROR: Gibbs outlier analysis must use basis_ecorr, not kernel ecorr')
# a-priori proportion of observations that are outliers
self.mp = m
# a-priori outlier probability distribution
self.theta_prior = theta_prior
# spin period
self.pspin = pspin
# vary t-distribution d.o.f
self.vary_df = vary_df
# vary alpha
self.vary_alpha = vary_alpha
# For now assume one pulsar
self._residuals = self.pta.get_residuals()[0]
# which likelihood model
self._lmodel = model
# auxiliary variable stuff
xs = [p.sample() for p in pta.params]
self._b = np.zeros(self.pta.get_basis(xs)[0].shape[1])
# for caching
self.TNT = None
self.d = None
# outlier detection variables
self._pout = np.zeros_like(self._residuals)
self._z = np.zeros_like(self._residuals)
if not vary_alpha:
self._alpha = np.ones_like(self._residuals) * alpha
else:
self._alpha = np.ones_like(self._residuals)
self._theta = self.mp
self.tdf = tdf
if model in ['t', 'mixture', 'vvh17']:
self._z = np.ones_like(self._residuals)
@property
def params(self):
ret = []
for param in self.pta.params:
ret.append(param)
return ret
def map_params(self, xs):
return {par.name: x for par,
x in zip(self.params, xs)}
def get_hyper_param_indices(self):
ind = []
for ct, par in enumerate(self.params):
if 'ecorr' in par.name or 'log10_A' in par.name or 'gamma' in par.name:
ind.append(ct)
return np.array(ind)
def get_white_noise_indices(self):
ind = []
for ct, par in enumerate(self.params):
if 'efac' in par.name or 'equad' in par.name:
ind.append(ct)
return np.array(ind)
def update_hyper_params(self, xs):
# get hyper parameter indices
hind = self.get_hyper_param_indices()
# get initial log-likelihood and log-prior
lnlike0, lnprior0 = self.get_lnlikelihood(xs), self.get_lnprior(xs)
xnew = xs.copy()
for ii in range(10):
# standard gaussian jump (this allows for different step sizes)
q = xnew.copy()
sigmas = 0.05 * len(hind)
probs = [0.1, 0.15, 0.5, 0.15, 0.1]
sizes = [0.1, 0.5, 1.0, 3.0, 10.0]
scale = np.random.choice(sizes, p=probs)
par = np.random.choice(hind, size=1) # only one hyper param at a time
q[par] += np.random.randn(len(q[par])) * sigmas * scale
# get log-like and log prior at new position
lnlike1, lnprior1 = self.get_lnlikelihood(q), self.get_lnprior(q)
# metropolis step
diff = (lnlike1 + lnprior1) - (lnlike0 + lnprior0)
if diff > np.log(np.random.rand()):
xnew = q
lnlike0 = lnlike1
lnprior0 = lnprior1
else:
xnew = xnew
return xnew
def update_white_params(self, xs):
# get white noise parameter indices
wind = self.get_white_noise_indices()
xnew = xs.copy()
lnlike0, lnprior0 = self.get_lnlikelihood_white(xnew), self.get_lnprior(xnew)
for ii in range(20):
# standard gaussian jump (this allows for different step sizes)
q = xnew.copy()
sigmas = 0.05 * len(wind)
probs = [0.1, 0.15, 0.5, 0.15, 0.1]
sizes = [0.1, 0.5, 1.0, 3.0, 10.0]
scale = np.random.choice(sizes, p=probs)
par = np.random.choice(wind, size=1)
q[par] += np.random.randn(len(q[par])) * sigmas * scale
# get log-like and log prior at new position
lnlike1, lnprior1 = self.get_lnlikelihood_white(q), self.get_lnprior(q)
# metropolis step
diff = (lnlike1 + lnprior1) - (lnlike0 + lnprior0)
if diff > np.log(np.random.rand()):
xnew = q
lnlike0 = lnlike1
lnprior0 = lnprior1
else:
xnew = xnew
return xnew
def update_b(self, xs):
# map parameter vector
params = self.map_params(xs)
# start likelihood calculations
loglike = 0
# get auxiliaries
Nvec = self._alpha**self._z * self.pta.get_ndiag(params)[0]
phiinv = self.pta.get_phiinv(params, logdet=False)[0]
residuals = self._residuals
T = self.pta.get_basis(params)[0]
if self.TNT is None and self.d is None:
self.TNT = np.dot(T.T, T / Nvec[:,None])
self.d = np.dot(T.T, residuals/Nvec)
#d = self.pta.get_TNr(params)[0]
#TNT = self.pta.get_TNT(params)[0]
# Red noise piece
Sigma = self.TNT + np.diag(phiinv)
try:
u, s, _ = sl.svd(Sigma)
mn = np.dot(u, np.dot(u.T, self.d)/s)
Li = u * np.sqrt(1/s)
except np.linalg.LinAlgError:
Q, R = sl.qr(Sigma)
Sigi = sl.solve(R, Q.T)
mn = np.dot(Sigi, self.d)
u, s, _ = sl.svd(Sigi)
Li = u * np.sqrt(1/s)
b = mn + np.dot(Li, np.random.randn(Li.shape[0]))
return b
def update_theta(self, xs):
if self._lmodel in ['t', 'gaussian']:
return self._theta
elif self._lmodel in ['mixture', 'vvh17']:
n = len(self._residuals)
if self.theta_prior == 'beta':
mk = n * self.mp
k1mm = n * (1-self.mp)
else:
mk, k1mm = 1.0, 1.0
# from Tak, <NAME> (2018): k = sample size, m = 0.01
ret = scipy.stats.beta.rvs(np.sum(self._z) + mk,
n - np.sum(self._z) + k1mm)
return ret
def update_z(self, xs):
# map parameters
params = self.map_params(xs)
if self._lmodel in ['t', 'gaussian']:
return self._z
elif self._lmodel in ['mixture', 'vvh17']:
Nvec0 = self.pta.get_ndiag(params)[0]
Tmat = self.pta.get_basis(params)[0]
Nvec = self._alpha * Nvec0
theta_mean = np.dot(Tmat, self._b)
top = self._theta * scipy.stats.norm.pdf(self._residuals,
loc=theta_mean,
scale=np.sqrt(Nvec))
if self._lmodel == 'vvh17':
top = self._theta / self.pspin
bot = top + (1-self._theta) * scipy.stats.norm.pdf(self._residuals,
loc=theta_mean,
scale=np.sqrt(Nvec0))
q = top / bot
q[np.isnan(q)] = 1
self._pout = q
return scipy.stats.binom.rvs(1, list(map(lambda x: min(x, 1), q)))
def update_alpha(self, xs):
# map parameters
params = self.map_params(xs)
# equation 12 of Tak, <NAME>
if np.sum(self._z) >= 1 and self.vary_alpha:
Nvec0 = self.pta.get_ndiag(params)[0]
Tmat = self.pta.get_basis(params)[0]
theta_mean = np.dot(Tmat, self._b)
top = ((self._residuals - theta_mean)**2 *
self._z / Nvec0 + self.tdf) / 2
bot = scipy.stats.gamma.rvs((self._z +
self.tdf) / 2)
return top / bot
else:
return self._alpha
def update_df(self, xs):
if self.vary_df:
# 1. evaluate the log conditional posterior of df for 1, 2, ..., 30.
log_den_df = np.array(list(map(self.get_lnlikelihood_df,
np.arange(1,31))))
# 2. normalize the probabilities
den_df = np.exp(log_den_df - log_den_df.max())
den_df /= den_df.sum()
# 3. sample one of values (1, 2, ..., 30) according to the probabilities
df = np.random.choice(np.arange(1, 31), p=den_df)
return df
else:
return self.tdf
def get_lnlikelihood_white(self, xs):
# map parameters
params = self.map_params(xs)
matrix = self.pta.get_ndiag(params)[0]
# Nvec and Tmat
Nvec = self._alpha**self._z * matrix
Tmat = self.pta.get_basis(params)[0]
# whitened residuals
mn = np.dot(Tmat, self._b)
yred = self._residuals - mn
# log determinant of N
logdet_N = np.sum(np.log(Nvec))
# triple product in likelihood function
rNr = np.sum(yred**2/Nvec)
# first component of likelihood function
loglike = -0.5 * (logdet_N + rNr)
return loglike
def get_lnlikelihood(self, xs):
# map parameter vector
params = self.map_params(xs)
# start likelihood calculations
loglike = 0
# get auxiliaries
Nvec = self._alpha**self._z * self.pta.get_ndiag(params)[0]
phiinv, logdet_phi = self.pta.get_phiinv(params,
logdet=True)[0]
residuals = self._residuals
T = self.pta.get_basis(params)[0]
if self.TNT is None and self.d is None:
self.TNT = np.dot(T.T, T / Nvec[:,None])
self.d = np.dot(T.T, residuals/Nvec)
# log determinant of N
logdet_N = np.sum(np.log(Nvec))
# triple product in likelihood function
rNr = np.sum(residuals**2/Nvec)
# first component of likelihood function
loglike += -0.5 * (logdet_N + rNr)
# Red noise piece
Sigma = self.TNT + np.diag(phiinv)
try:
cf = sl.cho_factor(Sigma)
expval = sl.cho_solve(cf, self.d)
except np.linalg.LinAlgError:
return -np.inf
logdet_sigma = np.sum(2 * np.log(np.diag(cf[0])))
loglike += 0.5 * (np.dot(self.d, expval) -
logdet_sigma - logdet_phi)
return loglike
def get_lnlikelihood_df(self, df):
n = len(self._residuals)
ll = -(df/2) * np.sum(np.log(self._alpha)+1/self._alpha) + \
n * (df/2) * np.log(df/2) - n*scipy.special.gammaln(df/2)
return ll
def get_lnprior(self, xs):
return sum(p.get_logpdf(x) for p, x
in zip(self.params, xs))
def sample(self, xs, outdir='./', niter=10000, resume=False):
print(f'Creating chain directory: {outdir}')
os.system(f'mkdir -p {outdir}')
self.chain = np.zeros((niter, len(xs)))
self.bchain = np.zeros((niter, len(self._b)))
self.thetachain = np.zeros(niter)
self.zchain = np.zeros((niter, len(self._residuals)))
self.alphachain = np.zeros((niter, len(self._residuals)))
self.poutchain = np.zeros((niter, len(self._residuals)))
self.dfchain = np.zeros(niter)
self.iter = 0
startLength = 0
xnew = xs
if resume:
print('Resuming from previous run...')
# read in previous chains
tmp_chains = []
tmp_chains.append(np.loadtxt(f'{outdir}/chain.txt'))
tmp_chains.append(np.loadtxt(f'{outdir}/bchain.txt'))
tmp_chains.append(np.loadtxt(f'{outdir}/thetachain.txt'))
tmp_chains.append(np.loadtxt(f'{outdir}/zchain.txt'))
tmp_chains.append(np.loadtxt(f'{outdir}/alphachain.txt'))
tmp_chains.append(np.loadtxt(f'{outdir}/poutchain.txt'))
tmp_chains.append( | np.loadtxt(f'{outdir}/dfchain.txt') | numpy.loadtxt |
# -*- coding: utf-8 -*-
"""Clark_Whitehead_HW5_Neural_Network_final.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1c1Doh3Fy7Hv0paTgVngGdJeH6vg0vJX1
# Neural Network
In this assignment you will build an artificial neural network _from scratch_, meaning without modern machine learning packages (e.g. scikit-learn, tensorflow, and pytorch). You should use numpy, and can use other standard python libraries if you like.
## Part 1: Neural Network with Stochastic Gradient Descent (4 points)
Define a class NeuralNetwork that implements an artificial neural network with a single hidden layer. The hidden layer should have non-linear activation function (e.g. Rectificed Linear Units, ReLU), and the output should have a Softmax activation function. Use the template provided.
The hard part of this is the **train** method, which requires computing lots of gradients. See the [notes](https://laulima.hawaii.edu/access/content/group/MAN.90549.202130/Notes_6__Introduction_to_Neural_Networks.pdf) on Laulima to see the equations for calculating these analytically. Translating these equations into code is non-trivial. The **backpropagation** algorithm is a dynamic programming algorithm that computes the gradients layer by layer, and can be written very elegantly in terms of matrix manipulations (a couple lines of numpy code).
_Reminder: Do NOT copy and paste code from the internet. Write your own code._
## Part 2: Apply Your Model to Fashion Dataset (3 points)
We will test the model on the Fashion MNIST dataset. This is a 10-class classification task designed to be similar to the MNIST digit recognition dataset. The classes are different items of clothing (shoes, shirts, handbags, etc.) instead of digits. Here is an [introduction](https://research.zalando.com/welcome/mission/research-projects/fashion-mnist/) and [github page](https://github.com/zalandoresearch/fashion-mnist).
1. Demonstrate overfitting in your network by plotting the training and test set losses vs. epoch. An *epoch* is one iteration through the training set; in SGD this means one weight update for each training example. You can use a smaller dataset so that you overfit faster, but clearly state how many examples are in your train and test sets.
2. Optimize the hyperparameters (learning rate and number of hidden neurons) of the neural network. Because the test dataset is fairly large (10k examples), you don't need to split off a separate validation set for this analysis. Report the best performance (test accuracy) and the best hyperparameters.
3. Visualize the 10 test examples with the largest loss.
## Part 3: Better and Faster: Mini-Batch SGD (3 points)
Implement mini-batch gradient descent in your NeuralNetwork train method. This is much more efficient to update the weights on *batches* of training data, e.g. 100 examples at a time, which serves two purposes: (1) each update is a better, less-noisy estimate of the true gradient, and (2) the matrix multiplications can be parallelized for an almost-linear speedup with multiple cores or a GPU (by default, numpy should automatically use multiple CPUs for matrix multiplications). This requires implementing the forward and backpropagation computations efficiently, using matrix multiplications rather than for loops.
"""
import numpy as np
from sklearn.metrics import accuracy_score
# Download Fashion MNIST Dataset
import gzip
import os
from urllib.request import urlretrieve
import numpy as np
import matplotlib.pyplot as plt
def fashion_mnist():
"""
Download compressed Fashion MNIST data to local directory, and
unpack data into numpy arrays.
Return (train_images, train_labels, test_images, test_labels).
Args:
None
Returns:
Tuple of (train_images, train_labels, test_images, test_labels), each
a matrix. Rows are examples. Columns of images are pixel values.
Columns of labels are a onehot encoding of the correct class.
"""
url = 'http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/'
files = ['train-images-idx3-ubyte.gz',
'train-labels-idx1-ubyte.gz',
't10k-images-idx3-ubyte.gz',
't10k-labels-idx1-ubyte.gz']
path = './' # Download data to current directory.
os.makedirs(path, exist_ok=True) # Create path if it doesn't exist.
# Download any missing files
for file in files:
if file not in os.listdir(path):
urlretrieve(url + file, os.path.join(path, file))
print("Downloaded %s to %s" % (file, path))
def _images(path):
"""Return images loaded locally."""
with gzip.open(path) as f:
# First 16 bytes are magic_number, n_imgs, n_rows, n_cols
pixels = np.frombuffer(f.read(), 'B', offset=16)
return pixels.reshape(-1, 784).astype('float32') / 255
def _labels(path):
"""Return labels loaded locally."""
with gzip.open(path) as f:
# First 8 bytes are magic_number, n_labels
integer_labels = np.frombuffer(f.read(), 'B', offset=8)
def _onehot(integer_labels):
"""Return matrix whose rows are onehot encodings of integers."""
n_rows = len(integer_labels)
n_cols = integer_labels.max() + 1
onehot = np.zeros((n_rows, n_cols), dtype='uint8')
onehot[np.arange(n_rows), integer_labels] = 1
return onehot
return _onehot(integer_labels)
train_images = _images(os.path.join(path, files[0]))
train_labels = _labels(os.path.join(path, files[1]))
test_images = _images(os.path.join(path, files[2]))
test_labels = _labels(os.path.join(path, files[3]))
return train_images, train_labels, test_images, test_labels
train_images, train_labels, test_images, test_labels = fashion_mnist()
# Plot examples from dataset.
plt.figure(1, figsize=(14,6))
for i in range(10):
plt.subplot(1,10, i+1)
plt.imshow(train_images[i,:].reshape(28,28), cmap='bone')
plt.title(f'Label: {train_labels[i].argmax()}')
plt.xticks([])
plt.yticks([])
print(test_images.shape)
train_images_1000 = train_images[0:1000, :]
train_labels_1000 = train_labels[0:1000, :]
train_images_100 = train_images[0:100, :]
train_labels_100 = train_labels[0:100, :]
test_images_1000 = test_images[0:1000, :]
test_labels_1000 = test_labels[0:1000, :]
test_images_100 = test_images[0:100, :]
test_labels_100 = test_labels[0:100, :]
train_images_10 = train_images[0:10, :]
train_labels_10 = train_labels[0:10, :]
train_images_1 = train_images[0:1, :]
train_labels_1 = train_labels[0:1, :]
print(train_images_10.shape)
#x_train shape = 11x4
x_train = np.array([[1.2, 4.3, 2.1, 1.9],
[6.2, 8.3, 5.1, 9.9],
[2.3, 4.3, 3.1, 0.9],
[4.1, 4.4, 1.1, 0.3],
[6.1, 7.1, 8.1, 9.1],
[1.0, 2.0, 1.0, 1.0],
[5.1, 5.1, 5.1, 5.1],
[1.8, 4.0, 3.9, 2.7],
[4.4, 0.8, 1.9, 2.7],
[6.9, 8.8, 5.7, 7.1]])
#y_train shape = 1x11
y_train = np.array([[1,0],
[0,1],
[1,0],
[1,0],
[0,1],
[1,0],
[0,1],
[1,0],
[1,0],
[0,1]])
# Part 1: Defining the neural network.
class NeuralNetwork():
def __init__(self, inputs, hidden, outputs):
"""
Initialize a simple neural network with a single hidden layer.
This method randomly initializes the parameters of the model,
saving them as private variables.
Each layer is parameterized by a weight matrix and a bias vector;
a useful trick is store the weights and biases for a layer together,
as a single matrix.
Args:
inputs: int, input dimension
hidden: int, number of hidden neurons
outputs: int, number of output neurons
Returns:
None
"""
# Initialize the weights and biases of the neural network as private variables.
# Store a weight matrix for each layer.
self.hidden = hidden
self.train_acc = []
self.test_acc = []
self.train_loss = []
self.test_loss = []
self.w1 = 2 * np.random.rand(inputs, hidden) - 1
self.w2 = 2 * np.random.rand(hidden, outputs) - 1
def loss(self, y_true, y_pred):
"""
Compute categorical cross-entropy loss function.
Sum loss contributions over the outputs (axis=1), but
average over the examples (axis=0)
Args:
y_true: NxD numpy array with N examples and D outputs (one-hot labels).
y_pred: NxD numpy array with N examples and D outputs (probabilities).
Returns:
loss: array of length N representing loss for each example.
"""
# WRITE ME
loss = np.sum(y_pred * y_true, axis=1) #multiply the one_hot from y_true by it's predicted value
loss = -np.log(loss) #take neg log of the predicted value
loss = np.mean(loss) #take mean of loss from whole batch
return loss
def softmax(self, input):
normalization = input - np.max(input, axis=1, keepdims=True) #subtract max value from all
#values in order to keep exp from exploding
exp = np.exp(input) #take e to the x where x=inputs for all z before softmax
output = exp / np.sum(exp, axis=1, keepdims=True) #divide all exp by sum so their combined total = 1
output = np.clip(output, 1e-7, 1 - 1e-7) #clip in case there is a 0 or a 1. Necessary for loss so we don't take log of 0
# output = np.sum(output * targets, axis=1, keepdims=1)
return output
def relu(self, input):
output = np.maximum(0, input)
return output
def predict(self, X):
"""
Make predictions on inputs X.
Args:
X: NxM numpy array where n-th row is an input.
Returns:
y_pred: NxD array where n-th row is vector of probabilities.
"""
# WRITE ME
self.h = np.dot(X, self.w1)
self.h = self.relu(self.h)
output = np.dot(self.h, self.w2)
y_pred = self.softmax(output)
return y_pred
def evaluate(self, X, y):
"""
Make predictions and compute loss.
Args:
X: NxM numpy array where n-th row is an input.
y: NxD numpy array with N examples and D outputs (one-hot labels).
Returns:
loss: array of length N representing loss for each example.
"""
# WRITE ME
y_pred = self.predict(X)
y_true = y
loss = self.loss(y_pred, y_true)
return loss
def train(self, X, y, X_test, y_test, max_epochs, lr=0.0001):
"""
Train the neural network using stochastic gradient descent.
Args:
X: NxM numpy array where n-th row is an input.
y: NxD numpy array with N examples and D outputs (one-hot labels).
lr: scalar learning rate. Use small value for debugging.
max_epochs: int, each epoch is one iteration through the training data.
Returns:
None
"""
# WRITE ME
for i in range(max_epochs):
y_pred = self.predict(X)
self.train_loss.append(self.loss(y, y_pred))
acc_pred = np.where(y_pred > 0.5, 1, 0)
self.train_acc.append(accuracy_score(acc_pred, y))
#derivatives
dl_dz = y_pred - y
dl_dw2 = np.dot(self.h.T, dl_dz)
dh_dzh = self.h > 0
dl_dzh = | np.dot(dl_dz, self.w2.T) | numpy.dot |
# -*- coding: utf-8 -*-
# run in py3 !!
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "1";
import tensorflow as tf
config = tf.ConfigProto()
# config.gpu_options.per_process_gpu_memory_fraction=0.5
config.gpu_options.allow_growth = True
tf.Session(config=config)
import numpy as np
from sklearn import preprocessing
import tensorflow as tf
import time
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
from sklearn.datasets import load_boston
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
import pandas as pd
from keras import backend as K
import keras.layers.convolutional as conv
from keras.layers import merge
from keras.wrappers.scikit_learn import KerasRegressor
from keras import utils
from keras.layers.pooling import MaxPooling1D, MaxPooling2D
from keras.layers import pooling
from keras.models import Sequential, Model
from keras.regularizers import l1, l2
from keras import layers
from keras.layers import Dense, Dropout, Activation, Flatten, Input, Convolution1D, Convolution2D, LSTM
from keras.optimizers import SGD, RMSprop
from keras.layers.normalization import BatchNormalization
from keras import initializers
from keras.callbacks import EarlyStopping
from keras import callbacks
from keras import backend as K
from keras.utils import to_categorical
from keras.callbacks import EarlyStopping, ModelCheckpoint, Callback
from keras.models import Model
from keras import initializers, layers
from keras.optimizers import SGD, Adadelta, Adam
from keras.regularizers import l1, l2
from keras import regularizers
import sys
sys.path.append('.')
from hist_figure import his_figures
if len(sys.argv) > 1:
prefix = sys.argv[1]
else:
prefix = time.time()
DATAPATH = '5fold/'
RESULT_PATH = './results/'
feature_num = 25
batch_num = 2
# batch_size = 32
batch_size = 512
SEQ_LENGTH = 20
STATEFUL = False
scaler = None # tmp, for fit_transform
# id,usage,date,com_date,week,month,year
# com_date,date,id,month,usage,week,year
def get_data(path_to_dataset='df_dh.csv', sequence_length=20, stateful=False, issplit=True):
fold_index = 1
###
dtypes = {'sub': 'float', 'super': 'float', 'error': 'float', 'com_date': 'int', 'week': 'str', 'month': 'str',
'year': 'str', 'numbers': 'int', 'log': 'float', 'id': 'str', 'usage': 'float'}
parse_dates = ['date']
print(path_to_dataset)
df = pd.read_csv(DATAPATH + path_to_dataset, header=0, dtype=dtypes, parse_dates=parse_dates, encoding="utf-8")
# print(path_to_dataset)
print(df.columns)
df = df[df['error'] >= 0]
# df_test = pd.read_csv(DATAPATH+"test"+str(fold_index)+".csv", header = 0, dtype=dtypes, parse_dates=parse_dates,encoding="utf-8")
def helper(x):
split = list(map(int, x.strip('[').strip(']').split(',')))
d = {}
for counter, value in enumerate(split):
k = str(len(split)) + "-" + str(counter)
d[k] = value
return d
# df_train_temp = df_train['week'].apply(helper).apply(pd.Series)
df_week = df['week'].apply(helper).apply(pd.Series).as_matrix() # 7
df_month = df['month'].apply(helper).apply(pd.Series).as_matrix() # 12
df_year = df['year'].apply(helper).apply(pd.Series).as_matrix() # 3
df_empty = df[['super', 'com_date', 'error', 'numbers']].copy()
# print(df_empty)
df_super = df_empty.ix[:, [0]]
df_com_date = df_empty.ix[:, [1]]
df_error = df_empty.ix[:, [2]]
df_numbers = df_empty.ix[:, [3]]
X_train_ = np.column_stack((df_super, df_com_date, df_numbers, df_week, df_month))
Y_train_ = df_error.as_matrix()
ss_x = preprocessing.MaxAbsScaler()
ss_y = preprocessing.MaxAbsScaler()
global scaler
scaler = ss_y
# ss_x = preprocessing.StandardScaler()
array_new = ss_x.fit_transform(df_empty.ix[:, [0]])
df_super = pd.DataFrame(array_new)
array_new = ss_x.fit_transform(df_empty.ix[:, [1]])
df_com_date = pd.DataFrame(array_new)
array_new = ss_x.fit_transform(df_empty.ix[:, [3]])
df_numbers = pd.DataFrame(array_new)
array_new = ss_y.fit_transform(df_empty.ix[:, [2]])
df_error = pd.DataFrame(array_new)
df_week = ss_x.fit_transform(df_week)
df_week = pd.DataFrame(df_week)
df_month = ss_x.fit_transform(df_month)
df_month = pd.DataFrame(df_month)
X_train = | np.column_stack((df_super, df_com_date, df_numbers, df_week, df_month)) | numpy.column_stack |
import caffe
from skimage import color, io
import cv2
import numpy as np
import glog as log
import os
import tifffile
import time
def load_network(prototxt='models/test.prototxt',
caffemodel='models/trn_iter_15000.caffemodel',
gpu_num=0):
caffe.set_mode_gpu()
caffe.set_device(gpu_num)
log.info('DCNN Weights: %s' % caffemodel)
log.info('DCNN Model Definition: %s' % prototxt)
return caffe.Net(prototxt, caffemodel, caffe.TEST)
# fac = 2 ^ (number of times image is downsampled in the network)
def prepare_input(img_in, fac=8, extra_pad=0, pad_mode='reflect', read_img=True):
if read_img:
log.info('Reading %s' % img_in)
img_in = cv2.imread(img_in)
if img_in.ndim == 2:
# force color conversion
img_in = np.repeat(img_in[:,:,np.newaxis],3,axis=2)
# Pad H,W to make them multiples of fac
H=img_in.shape[0]
W=img_in.shape[1]
padH = (fac - (H % fac)) % fac
padW = (fac - (W % fac)) % fac
if padH>0 or padW>0 or extra_pad>0:
img_in = np.pad(img_in,pad_width=((extra_pad,padH+extra_pad),(extra_pad,padW+extra_pad),(0,0)),mode=pad_mode)
img_in=img_in.astype(np.float32)
return (img_in, H, W)
# Forward Pass
def forward_once(net, img_in, H, W):
t=time.time()
# H*W*C to C*H*W
img_in= | np.transpose(img_in,[2,0,1]) | numpy.transpose |
from torch.utils.data import DataLoader, Dataset, Sampler
from pathlib import Path
from collections import defaultdict
import json
import random
from multiprocessing import Pool
import h5py
import pickle
import math
from tqdm import tqdm
import torch
import numpy as np
import csv
from copy import deepcopy
from torch.utils.data.distributed import DistributedSampler
from transformers import T5TokenizerFast, BartTokenizer
from tokenization import VLT5TokenizerFast, VLT5Tokenizer
project_dir = Path(__file__).resolve().parent.parent # VLT5
workspace_dir = project_dir.parent
dataset_dir = workspace_dir.joinpath('datasets/').resolve()
vcr_dir = dataset_dir.joinpath('VCR')
vcr_img_dir = vcr_dir.joinpath('vcr1images')
vcr_feature_dir = vcr_dir.joinpath('features')
class VCRFineTuneDataset(Dataset):
def __init__(self, split='train', raw_dataset=None, rank=-1, topk=-1, verbose=True, args=None, mode='train'):
super().__init__()
self.topk = topk
self.verbose = verbose
self.args = args
self.mode = mode
# Loading datasets to data
self.split = split
self.sources = split.split(',')
if self.verbose:
print('Data sources: ', self.sources)
if 't5' in self.args.backbone:
if self.args.use_vision:
self.tokenizer = VLT5TokenizerFast.from_pretrained(
args.backbone,
max_length=self.args.max_text_length,
do_lower_case=self.args.do_lower_case)
else:
self.tokenizer = T5TokenizerFast.from_pretrained(
args.backbone,
max_length=self.args.max_text_length,
do_lower_case=self.args.do_lower_case)
elif 'bart' in self.args.backbone:
self.tokenizer = BartTokenizer.from_pretrained(
args.backbone,
# max_length=self.args.max_text_length,
do_lower_case=self.args.do_lower_case)
additional_special_tokens = [f'<extra_id_{i}>' for i in range(100-1, -1, -1)] + \
[f'<vis_extra_id_{i}>' for i in range(100-1, -1, -1)]
special_tokens_dict = {'additional_special_tokens': additional_special_tokens}
num_added_toks = self.tokenizer.add_special_tokens(special_tokens_dict)
self.img_ids_to_source = {}
data_info_dicts = []
for source in self.sources:
data_info_path = dataset_dir.joinpath(f'VCR/{source}.jsonl')
with open(data_info_path) as f:
_data_info_dicts = [json.loads(s) for s in f]
for _d in _data_info_dicts:
self.img_ids_to_source[_d['img_id']] = source
_d['source'] = source
data_info_dicts.extend(_data_info_dicts)
if self.verbose:
print(f"Loaded {len(_data_info_dicts)} data from", source)
data = data_info_dicts
self.rank = rank
if self.topk > 0:
data = data[:self.topk]
if self.verbose:
print(f"Use only {self.topk} data")
self.data = data
if self.verbose:
print("# all sentences:", len(self.data))
self.n_boxes = args.n_boxes
self.source_to_h5 = {
'train': vcr_feature_dir.joinpath(f'train_boxes36.h5'),
'val': vcr_feature_dir.joinpath(f'val_boxes36.h5'),
'test': vcr_feature_dir.joinpath(f'test_boxes36.h5'),
'train_GT': vcr_feature_dir.joinpath(f'train_boxes_GT.h5'),
'val_GT': vcr_feature_dir.joinpath(f'val_boxes_GT.h5'),
'test_GT': vcr_feature_dir.joinpath(f'test_boxes_GT.h5'),
}
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
out_dict = {}
out_dict['args'] = self.args
datum = self.data[idx]
uid = f"{datum['img_id']}_{datum['question_number']}"
out_dict['uid'] = uid
test = 'test' in datum['annot_id']
out_dict['is_test'] = test
###### Image ######
assert self.args.use_vision
img_id = datum['img_id']
out_dict['img_id'] = img_id
img_path = vcr_img_dir.joinpath(datum['img_fn'])
# assert img_path.exists()
out_dict['img_path'] = img_path
source = self.img_ids_to_source[img_id]
f = self.source_to_h5[source]
f_GT = self.source_to_h5[f'{source}_GT']
if isinstance(f, Path):
f = h5py.File(f, 'r')
self.source_to_h5[source] = f
if isinstance(f_GT, Path):
f_GT = h5py.File(f_GT, 'r')
self.source_to_h5[f'{source}_GT'] = f_GT
img_h = f[f'{img_id}/img_h'][()]
img_w = f[f'{img_id}/img_w'][()]
gt_boxes = f_GT[f'{img_id}/boxes'][()] # (x1, y1, x2, y2)
n_gt_boxes = min(len(gt_boxes), 36)
gt_boxes = gt_boxes[:n_gt_boxes]
n_pred_boxes = 36 - n_gt_boxes
pred_boxes = f[f'{img_id}/boxes'][:n_pred_boxes]
boxes = np.concatenate([gt_boxes, pred_boxes], axis=0)
# Normalize the boxes (to 0 ~ 1)
boxes[:, (0, 2)] /= img_w
boxes[:, (1, 3)] /= img_h
np.testing.assert_array_less(boxes, 1+1e-5)
| np.testing.assert_array_less(-boxes, 0+1e-5) | numpy.testing.assert_array_less |
import numpy as np
import pandas as pd
import pytest
from scipy import stats
from locan import LocData
from locan.analysis import BlinkStatistics
from locan.analysis.blinking import _blink_statistics, _DistributionFits
def test__blink_statistics_0():
# frame with on and off periods up to three frames and starting with one-frame on-period.
frames = np.array([0, 4, 6, 7, 8, 12, 13])
results = _blink_statistics(frames, memory=0, remove_heading_off_periods=False)
assert len(results["on_periods"]) == len(results["on_periods_frame"])
assert len(results["off_periods"]) == len(results["off_periods_frame"])
assert np.array_equal(results["on_periods"], [1, 1, 3, 2])
assert np.array_equal(results["off_periods"], [3, 1, 3])
assert np.array_equal(results["on_periods_frame"], [0, 4, 6, 12])
assert np.array_equal(results["off_periods_frame"], [1, 5, 9])
assert all(
[
np.array_equal(one, two)
for one, two in zip(
results["on_periods_indices"], [[0], [1], [2, 3, 4], [5, 6]]
)
]
)
results = _blink_statistics(frames, memory=1, remove_heading_off_periods=False)
assert len(results["on_periods"]) == len(results["on_periods_frame"])
assert len(results["off_periods"]) == len(results["off_periods_frame"])
assert np.array_equal(results["on_periods"], [1, 5, 2])
assert np.array_equal(results["off_periods"], [3, 3])
assert np.array_equal(results["on_periods_frame"], [0, 4, 12])
assert np.array_equal(results["off_periods_frame"], [1, 9])
assert all(
[
np.array_equal(one, two)
for one, two in zip(
results["on_periods_indices"], [[0], [1, 2, 3, 4], [5, 6]]
)
]
)
results = _blink_statistics(frames, memory=10, remove_heading_off_periods=False)
assert len(results["on_periods"]) == len(results["on_periods_frame"])
assert len(results["off_periods"]) == len(results["off_periods_frame"])
assert np.array_equal(results["on_periods"], [14])
assert np.array_equal(results["off_periods"], [])
assert np.array_equal(results["on_periods_frame"], [0])
assert np.array_equal(results["off_periods_frame"], [])
assert all(
[
np.array_equal(one, two)
for one, two in zip(results["on_periods_indices"], [[0, 1, 2, 3, 4, 5, 6]])
]
)
def test__blink_statistics_1():
# frame with on and off periods up to three frames and starting with two-frame on-period.
frames = np.array([0, 1, 3, 6, 7, 8, 12, 13])
results = _blink_statistics(frames, memory=0, remove_heading_off_periods=False)
assert len(results["on_periods"]) == len(results["on_periods_frame"])
assert len(results["off_periods"]) == len(results["off_periods_frame"])
assert np.array_equal(results["on_periods"], [2, 1, 3, 2])
assert np.array_equal(results["off_periods"], [1, 2, 3])
assert np.array_equal(results["on_periods_frame"], [0, 3, 6, 12])
assert np.array_equal(results["off_periods_frame"], [2, 4, 9])
assert all(
[
np.array_equal(one, two)
for one, two in zip(
results["on_periods_indices"], [[0, 1], [2], [3, 4, 5], [6, 7]]
)
]
)
results = _blink_statistics(frames, memory=1, remove_heading_off_periods=False)
assert len(results["on_periods"]) == len(results["on_periods_frame"])
assert len(results["off_periods"]) == len(results["off_periods_frame"])
assert np.array_equal(results["on_periods"], [4, 3, 2])
assert np.array_equal(results["off_periods"], [2, 3])
assert np.array_equal(results["on_periods_frame"], [0, 6, 12])
assert np.array_equal(results["off_periods_frame"], [4, 9])
assert all(
[
np.array_equal(one, two)
for one, two in zip(
results["on_periods_indices"], [[0, 1, 2], [3, 4, 5], [6, 7]]
)
]
)
results = _blink_statistics(frames, memory=10, remove_heading_off_periods=False)
assert len(results["on_periods"]) == len(results["on_periods_frame"])
assert len(results["off_periods"]) == len(results["off_periods_frame"])
assert | np.array_equal(results["on_periods"], [14]) | numpy.array_equal |
""" Experiment runner for the model with knowledge graph attached to interaction data """
from __future__ import division
from __future__ import print_function
import argparse
import datetime
import time
import tensorflow as tf
import numpy as np
import scipy.sparse as sp
import sys
import json
from preprocessing import create_trainvaltest_split, \
sparse_to_tuple, preprocess_user_item_features, globally_normalize_bipartite_adjacency, \
load_data_monti, load_official_trainvaltest_split, normalize_features
from model import RecommenderGAE, RecommenderSideInfoGAE
from utils import construct_feed_dict
def run(user_features, movie_features, learning_rate=0.01, epochs=500, hidden=[500, 75], feat_hidden=64, accumulation='sum', dropout=0.7,
num_basis_functions=2, features=False, symmetric=True, testing=True):
"""accumulation can be sum or stack"""
# Set random seed
# seed = 123 # use only for unit testing
seed = int(time.time())
np.random.seed(seed)
tf.set_random_seed(seed)
tf.reset_default_graph()
# Settings
# ap = argparse.ArgumentParser()
# # ap.add_argument("-d", "--dataset", type=str, default="ml_100k",
# # choices=['ml_100k', 'ml_1m', 'ml_10m', 'douban', 'yahoo_music', 'flixster'],
# # help="Dataset string.")
# ap.add_argument("-lr", "--learning_rate", type=float, default=0.01,
# help="Learning rate")
# ap.add_argument("-e", "--epochs", type=int, default=2500,
# help="Number training epochs")
# ap.add_argument("-hi", "--hidden", type=int, nargs=2, default=[500, 75],
# help="Number hidden units in 1st and 2nd layer")
# ap.add_argument("-fhi", "--feat_hidden", type=int, default=64,
# help="Number hidden units in the dense layer for features")
# ap.add_argument("-ac", "--accumulation", type=str, default="sum", choices=['sum', 'stack'],
# help="Accumulation function: sum or stack.")
# ap.add_argument("-do", "--dropout", type=float, default=0.7,
# help="Dropout fraction")
# ap.add_argument("-nb", "--num_basis_functions", type=int, default=2,
# help="Number of basis functions for Mixture Model GCN.")
# ap.add_argument("-ds", "--data_seed", type=int, default=1234,
# help="""Seed used to shuffle data in data_utils, taken from cf-nade (1234, 2341, 3412, 4123, 1324).
# Only used for ml_1m and ml_10m datasets. """)
# ap.add_argument("-sdir", "--summaries_dir", type=str, default='logs/' + str(datetime.datetime.now()).replace(' ', '_'),
# help="Directory for saving tensorflow summaries.")
# # Boolean flags
# fp = ap.add_mutually_exclusive_group(required=False)
# fp.add_argument('-nsym', '--norm_symmetric', dest='norm_symmetric',
# help="Option to turn on symmetric global normalization", action='store_true')
# fp.add_argument('-nleft', '--norm_left', dest='norm_symmetric',
# help="Option to turn on left global normalization", action='store_false')
# ap.set_defaults(norm_symmetric=True)
# fp = ap.add_mutually_exclusive_group(required=False)
# fp.add_argument('-f', '--features', dest='features',
# help="Whether to use features (1) or not (0)", action='store_true')
# fp.add_argument('-no_f', '--no_features', dest='features',
# help="Whether to use features (1) or not (0)", action='store_false')
# ap.set_defaults(features=False)
# fp = ap.add_mutually_exclusive_group(required=False)
# fp.add_argument('-ws', '--write_summary', dest='write_summary',
# help="Option to turn on summary writing", action='store_true')
# fp.add_argument('-no_ws', '--no_write_summary', dest='write_summary',
# help="Option to turn off summary writing", action='store_false')
# ap.set_defaults(write_summary=False)
# fp = ap.add_mutually_exclusive_group(required=False)
# fp.add_argument('-t', '--testing', dest='testing',
# help="Option to turn on test set evaluation", action='store_true')
# fp.add_argument('-v', '--validation', dest='testing',
# help="Option to only use validation set evaluation", action='store_false')
# ap.set_defaults(testing=False)
# args = vars(ap.parse_args())
# print('Settings:')
# print(args, '\n')
# Define parameters
DATASET = 'ml_100k'
DATASEED = 1234
NB_EPOCH = epochs
DO = dropout
HIDDEN = hidden
FEATHIDDEN = feat_hidden
BASES = num_basis_functions
LR = learning_rate
WRITESUMMARY = False
SUMMARIESDIR = 'logs/' + str(datetime.datetime.now()).replace(' ', '_')
FEATURES = features
SYM = symmetric
TESTING = testing
ACCUM = accumulation
SELFCONNECTIONS = False
SPLITFROMFILE = True
VERBOSE = True
NUMCLASSES = 5
# Splitting dataset in training, validation and test set
print("Using official MovieLens dataset split u1.base/u1.test with 20% validation set size...")
u_features = user_features
v_features = movie_features
_, _, adj_train, train_labels, train_u_indices, train_v_indices, \
val_labels, val_u_indices, val_v_indices, test_labels, \
test_u_indices, test_v_indices, class_values = load_official_trainvaltest_split('ml_100k', TESTING)
num_users, num_items = adj_train.shape
num_side_features = 0
# feature loading
if not FEATURES:
u_features = sp.identity(num_users, format='csr')
v_features = sp.identity(num_items, format='csr')
u_features, v_features = preprocess_user_item_features(u_features, v_features)
elif FEATURES and u_features is not None and v_features is not None:
# use features as side information and node_id's as node input features
print("Normalizing feature vectors...")
u_features_side = normalize_features(u_features)
v_features_side = normalize_features(v_features)
u_features_side, v_features_side = preprocess_user_item_features(u_features_side, v_features_side)
u_features_side = np.array(u_features_side.todense(), dtype=np.float32)
v_features_side = np.array(v_features_side.todense(), dtype=np.float32)
num_side_features = u_features_side.shape[1]
# node id's for node input features
id_csr_v = sp.identity(num_items, format='csr')
id_csr_u = sp.identity(num_users, format='csr')
u_features, v_features = preprocess_user_item_features(id_csr_u, id_csr_v)
else:
raise ValueError('Features flag is set to true but no features are loaded from dataset ' + DATASET)
# global normalization
support = []
support_t = []
adj_train_int = sp.csr_matrix(adj_train, dtype=np.int32)
for i in range(NUMCLASSES):
# build individual binary rating matrices (supports) for each rating
support_unnormalized = sp.csr_matrix(adj_train_int == i + 1, dtype=np.float32)
if support_unnormalized.nnz == 0 and DATASET != 'yahoo_music':
# yahoo music has dataset split with not all ratings types present in training set.
# this produces empty adjacency matrices for these ratings.
sys.exit('ERROR: normalized bipartite adjacency matrix has only zero entries!!!!!')
support_unnormalized_transpose = support_unnormalized.T
support.append(support_unnormalized)
support_t.append(support_unnormalized_transpose)
support = globally_normalize_bipartite_adjacency(support, symmetric=SYM)
support_t = globally_normalize_bipartite_adjacency(support_t, symmetric=SYM)
if SELFCONNECTIONS:
support.append(sp.identity(u_features.shape[0], format='csr'))
support_t.append(sp.identity(v_features.shape[0], format='csr'))
num_support = len(support)
support = sp.hstack(support, format='csr')
support_t = sp.hstack(support_t, format='csr')
if ACCUM == 'stack':
div = HIDDEN[0] // num_support
if HIDDEN[0] % num_support != 0:
print("""\nWARNING: HIDDEN[0] (=%d) of stack layer is adjusted to %d such that
it can be evenly split in %d splits.\n""" % (HIDDEN[0], num_support * div, num_support))
HIDDEN[0] = num_support * div
# Collect all user and item nodes for test set
test_u = list(set(test_u_indices))
test_v = list(set(test_v_indices))
test_u_dict = {n: i for i, n in enumerate(test_u)}
test_v_dict = {n: i for i, n in enumerate(test_v)}
test_u_indices = np.array([test_u_dict[o] for o in test_u_indices])
test_v_indices = | np.array([test_v_dict[o] for o in test_v_indices]) | numpy.array |
import os
import sys
import imp
import numpy as np
import warnings
warnings.filterwarnings("ignore")
# Test for Torch
def torch(test_models, model_path, img_path):
results_o, results_d, op_sets = dict(), dict(), dict()
from PIL import Image
import torch
import torchvision.models as models
from torchvision import transforms
from torch.autograd import Variable
# Torch to IR
from ox.pytorch.pytorch_parser import PytorchParser
for model in test_models:
if 'inception' in model: image_size = 299
else: image_size = 224
image = Image.open(img_path)
transformation = transforms.Compose([
transforms.Resize((image_size, image_size)),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
image_tensor = transformation(image).float()
image_tensor = image_tensor.unsqueeze_(0)
x = Variable(image_tensor)
inputshape = [3, image_size, image_size]
arch_filename = os.path.join(model_path, 'PyTorch', model+'.pth')
# test model
if 'resnet50' in model:
model_eval = models.resnet50()
elif 'inception' in model:
from models.torch import inception
model_eval = inception.inceptionresnetv2(pretrained=False)
elif 'shufflenet' in model:
from models.torch import shufflenet
model_eval = shufflenet.shufflenet()
elif 'fcn' in model:
from models.torch import fcn
model_eval = fcn.FCNs()
elif 'lstm' in model:
from models.torch import lstm
model_eval = lstm.Lstm()
model_eval.eval()
predict = model_eval(x).data.numpy()
preds = np.squeeze(predict)
print('\033[1;31;40m')
print(' Result of', model, ': ', np.argmax(preds))
print('\033[0m')
results_o[model] = preds
torch.save(model_eval, arch_filename)
# convert
IR_filename = os.path.join(model_path, 'IR', model+'_torch')
parser = PytorchParser(arch_filename, inputshape)
ops = parser.run(IR_filename)
op_sets[model] = ops
del parser
del PytorchParser
# IR to Torch
from ox.pytorch.pytorch_emitter import PytorchEmitter
for model in test_models:
if 'inception' in model: image_size = 299
else: image_size = 224
image = Image.open(img_path)
transformation = transforms.Compose([
transforms.Resize((image_size, image_size)),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
image_tensor = transformation(image).float()
image_tensor = image_tensor.unsqueeze_(0)
x = Variable(image_tensor)
inputshape = [3, image_size, image_size]
arch_filename = os.path.join(model_path, 'IR', model+'_torch.pb')
weight_filename = os.path.join(model_path, 'IR', model+'_torch.npy')
converted_file = os.path.join(model_path, 'PyTorch', model+'_ox')
emitter = PytorchEmitter((arch_filename, weight_filename))
emitter.run(converted_file + '.py', converted_file + '.npy', 'test')
model_converted = imp.load_source('PytorchModel', converted_file + '.py').KitModel(converted_file + '.npy')
model_converted.eval()
predict = model_converted(x).data.numpy()
preds = np.squeeze(predict)
print('\033[1;31;40m')
print(' Result of ', model+'_ox : ', np.argmax(preds))
print('\033[0m')
results_d[model] = np.mean(np.power(results_o[model] - preds, 2))
del emitter
del PytorchEmitter
return results_d, op_sets
# Test for Tensorflow
def tensorflow(test_models, model_path, img_path):
results_o, results_d, op_sets = dict(), dict(), dict()
import tensorflow as tf
from PIL import Image
image = Image.open(img_path)
# Tensorflow to IR
from ox.tensorflow.tensorflow_parser import TensorflowParser
for model in test_models:
arch_filename = os.path.join(model_path, 'tensorflow', model, model+'.ckpt.meta')
weight_filename = os.path.join(model_path, 'tensorflow', model, model+'.ckpt')
# test model
if 'resnet50' in model:
img = np.array(image.resize((299, 299), Image.ANTIALIAS))
x = np.expand_dims(img, axis=0)
from models.tf import resnet50
preds = resnet50.test(x, model_path)
elif 'inception' in model:
img = np.array(image.resize((224, 224), Image.ANTIALIAS))
x = np.expand_dims(img, axis=0)
from models.tf import inception_v3
preds = inception_v3.test(x, model_path)
elif 'shufflenet' in model:
img = np.array(image.resize((224, 224), Image.ANTIALIAS))
x = np.expand_dims(img, axis=0)
from models.tf import shufflenet
preds = shufflenet.test(x, model_path)
elif 'fcn' in model:
img = np.array(image.resize((224, 224), Image.ANTIALIAS))
x = np.expand_dims(img, axis=0)
from models.tf import fcn
preds = fcn.test(x, model_path)
elif 'lstm' in model:
img = np.array(image.resize((224, 224), Image.ANTIALIAS))
x = np.expand_dims(img, axis=0)
from models.tf import lstm
preds = lstm.test(x, model_path)
print('\033[1;31;40m')
print(' Result of', model, ': ', np.argmax(preds))
print('\033[0m')
preds = np.squeeze(preds)
if 'fcn' in model: preds = np.array(preds).astype(np.int32)
results_o[model] = preds
import tensorflow.contrib.keras as keras
keras.backend.clear_session()
# parser
IR_filename = os.path.join(model_path, 'IR', model+'_tf')
parser = TensorflowParser(arch_filename, weight_filename, ["OX_output"])
ops = parser.run(IR_filename)
op_sets[model] = ops
del parser
del TensorflowParser
# IR to Tensorflow
from ox.tensorflow.tensorflow_emitter import TensorflowEmitter
for model in test_models:
arch_filename = os.path.join(model_path, 'IR', model+'_tf.pb')
weight_filename = os.path.join(model_path, 'IR', model+'_tf.npy')
converted_file = os.path.join(model_path, 'tensorflow', model, model+'_ox')
emitter = TensorflowEmitter((arch_filename, weight_filename))
emitter.run(converted_file + '.py', None, 'test')
# test model
if 'resnet' in model:
img = image.resize((299, 299), Image.ANTIALIAS)
else:
img = image.resize((224, 224), Image.ANTIALIAS)
img = np.array(img)
x = np.expand_dims(img, axis=0)
if 'lstm' in model:
x = np.reshape(x, (-1, 224 * 224 * 3))
model_converted = imp.load_source('TFModel', converted_file + '.py').KitModel(weight_filename)
input_tf, model_tf = model_converted
with tf.Session() as sess:
init = tf.global_variables_initializer()
sess.run(init)
predict = sess.run(model_tf, feed_dict = {input_tf : x})
del model_converted
del sys.modules['TFModel']
preds = np.squeeze(predict)
if 'fcn' in model: preds = | np.array(preds) | numpy.array |
import warnings
import numpy as np
import quaternionic
import pytest
# Array methods
def test_new(array):
q = array(1, 2, 3, 4)
assert q.dtype == np.float
assert q.shape == (4,)
assert q.w == 1.0 and q.x == 2.0 and q.y == 3.0 and q.z == 4.0
q = array([1, 2, 3, 4])
assert q.dtype == np.float
assert q.shape == (4,)
assert q.w == 1.0 and q.x == 2.0 and q.y == 3.0 and q.z == 4.0
q = array([[1, 2, 3, 4]])
assert q.dtype == np.float
assert q.shape == (1, 4)
assert q.w == 1.0 and q.x == 2.0 and q.y == 3.0 and q.z == 4.0
with pytest.raises(ValueError):
array(np.array(3.14))
with pytest.raises(ValueError):
array(np.array([]))
with pytest.raises(ValueError):
array(np.random.rand(4, 3))
def test_getitem(array):
q = array(np.random.normal(size=(17, 3, 4)))
p = q[1:-1]
assert isinstance(p, array)
assert p.shape == (q.shape[0]-2,) + q.shape[1:]
assert np.array_equal(p.ndarray, q.ndarray[1:-1])
with pytest.raises(ValueError):
q[..., 1:3]
def test_array_finalize(array):
q = array([1, 2, 3, 4])
with pytest.raises(ValueError):
q[1:3]
def test_repr(array):
q = array(np.random.normal(size=(17, 3, 4)))
assert repr(q) == 'quaternionic.' + repr(q.ndarray)
def test_str(array):
q = array(np.random.normal(size=(17, 3, 4)))
assert str(q) == str(q.ndarray)
def test_ones_like(Qs):
z = np.ones_like(Qs)
assert np.all(z.ndarray[:, 1:] == 0)
assert | np.all(z.ndarray[:, 0] == 1) | numpy.all |
import pandas as pd
import numpy as np
import os
from sim.Bus import Bus
from sim.Route import Route
from sim.Busstop import Bus_stop
from sim.Passenger import Passenger
import matplotlib.pyplot as plt
pd.options.mode.chained_assignment = None
def getBusRoute(data):
my_path = os.path.abspath(os.path.dirname(__file__))
path = my_path + "/data/" + data + "/"
_path_trips = path + 'trips.txt'
_path_st = path + 'stop_times.txt'
trips = pd.DataFrame(pd.read_csv(_path_trips))
stop_times = pd.DataFrame(pd.read_csv(_path_st))
stop_times.dropna(subset=['arrival_time'], inplace=True)
bus_routes = {}
trip_ids = set(stop_times['trip_id'])
try:
service_id = trips.iloc[np.random.randint(0, trips.shape[0])]['service_id']
trips = trips[trips['service_id'] == service_id]
except:
pass
# each route_id may correspond to multiple trip_id
for trip_id in trip_ids:
# A completely same route indicates the same shape_id in trip file, but this field is not 100% provided by opendata
try:
if 'shape_id' in trips.columns:
route_id = str(trips[trips['trip_id'] == trip_id].iloc[0]['shape_id'])
block_id = ''
dir = ''
else:
route_id = str(trips[trips['trip_id'] == trip_id].iloc[0]['route_id'])
block_id = str(trips[trips['trip_id'] == trip_id].iloc[0]['block_id'])
dir = str(trips[trips['trip_id'] == trip_id].iloc[0]['trip_headsign'])
except:
continue
# Identifies a set of dates when service is available for one or more routes.
trip = stop_times[stop_times['trip_id'] == trip_id]
try:
trip['arrival_time'] = pd.to_datetime(trip['arrival_time'], format='%H:%M:%S')
except:
trip['arrival_time'] = pd.to_datetime(trip['arrival_time'], format="%Y-%m-%d %H:%M:%S")
trip = trip.sort_values(by='arrival_time')
trip_dist = trip.iloc[:]['shape_dist_traveled'].to_list()
if len(trip_dist) <= 0 or np.isnan(trip_dist[0]):
continue
schedule = ((trip.iloc[:]['arrival_time'].dt.hour * 60 + trip.iloc[:]['arrival_time'].dt.minute) * 60 +
trip.iloc[:]['arrival_time'].dt.second).to_list()
if len(schedule) <= 2 or np.isnan(schedule[0]):
continue
b = Bus(id=trip_id, route_id=route_id, stop_list=trip.iloc[:]['stop_id'].to_list(),
dispatch_time=schedule[0], block_id=block_id, dir=dir)
b.left_stop = []
b.speed = (trip_dist[1] - trip_dist[0]) / (schedule[1] - schedule[0])
b.c_speed = b.speed
for i in range(len(trip_dist)):
if str(b.stop_list[i]) in b.stop_dist:
b.left_stop.append(str(b.stop_list[i]) + '_' + str(i))
b.stop_dist[str(b.stop_list[i]) + '_' + str(i)] = trip_dist[i]
b.schedule[str(b.stop_list[i]) + '_' + str(i)] = schedule[i]
else:
b.left_stop.append(str(b.stop_list[i]))
b.stop_dist[str(b.stop_list[i])] = trip_dist[i]
b.schedule[str(b.stop_list[i])] = schedule[i]
b.stop_list = b.left_stop[:]
b.set()
if route_id in bus_routes:
bus_routes[route_id].append(b)
else:
bus_routes[route_id] = [b]
# Do not consider the route with only 1 trip
bus_routes_ = {}
for k, v in bus_routes.items():
if len(v) > 1:
bus_routes_[k] = v
return bus_routes_
def getStopList(data, read=0):
my_path = os.path.abspath(os.path.dirname(__file__))
path = my_path + "/data/" + data + "/"
_path_stops = path + 'stops.txt'
_path_st = path + 'stop_times.txt'
_path_trips = path + 'trips.txt'
stops = pd.DataFrame(pd.read_csv(_path_stops))
stop_times = pd.DataFrame(pd.read_csv(_path_st))
trips = pd.DataFrame(pd.read_csv(_path_trips))
stop_list = {}
select_stops = pd.merge(stops, stop_times, on=['stop_id'], how='left')
select_stops = select_stops.sort_values(by='shape_dist_traveled', ascending=False)
select_stops = select_stops.drop_duplicates(subset='stop_id', keep="first").sort_values(by='shape_dist_traveled',
ascending=True)
for i in range(select_stops.shape[0]):
stop = Bus_stop(id=str(select_stops.iloc[i]['stop_id']), lat=select_stops.iloc[i]['stop_lat'],
lon=select_stops.iloc[i]['stop_lon'])
stop.loc = select_stops.iloc[i]['shape_dist_traveled']
try:
stop.next_stop = str(select_stops.iloc[i + 1]['stop_id'])
except:
stop.next_stop = None
stop_list[str(select_stops.iloc[i]['stop_id'])] = stop
_path_demand = path + 'demand.csv'
pax_num = 0
try:
demand = pd.DataFrame(pd.read_csv(_path_demand))
except:
print('No available demand file')
return stop_list, 0
try:
demand['Ride_Start_Time'] = pd.to_datetime(demand['Ride_Start_Time'], format='%H:%M:%S')
except:
demand['Ride_Start_Time'] = pd.to_datetime(demand['Ride_Start_Time'], format="%Y-%m-%d %H:%M:%S")
demand['Ride_Start_Time_sec'] = (demand.iloc[:]['Ride_Start_Time'].dt.hour * 60 + demand.iloc[:][
'Ride_Start_Time'].dt.minute) * 60 + demand.iloc[:]['Ride_Start_Time'].dt.second
demand.dropna(subset=['ALIGHTING_STOP_STN'], inplace=True)
demand = demand[demand.ALIGHTING_STOP_STN != demand.BOARDING_STOP_STN]
demand = demand.sort_values(by='Ride_Start_Time_sec')
for stop_id, stop in stop_list.items():
demand_by_stop = demand[demand['BOARDING_STOP_STN'] == int(stop_id)]
# macro demand setting
if read == 0:
t = 0
while t < 24:
d = demand_by_stop[(demand_by_stop['Ride_Start_Time_sec'] >= t * 3600) & (
demand_by_stop['Ride_Start_Time_sec'] < (t + 1) * 3600)]
stop.dyna_arr_rate.append(d.shape[0] / 3600.)
for dest_id in stop_list.keys():
od = d[demand['ALIGHTING_STOP_STN'] == int(dest_id)]
if od.empty:
continue
if dest_id not in stop.dest:
stop.dest[dest_id] = [0 for _ in range(24)]
stop.dest[dest_id][t] = od.shape[0] / 3600.
t += 1
else:
# micro demand setting
for i in range(demand_by_stop.shape[0]):
pax = Passenger(id=demand_by_stop.iloc[i]['TripID'], origin=stop_id,
plan_board_time=float(demand_by_stop.iloc[i]['Ride_Start_Time_sec']))
pax.dest = str(int(demand_by_stop.iloc[i]['ALIGHTING_STOP_STN']))
pax.realcost = float(demand_by_stop.iloc[i]['Ride_Time']) * 60.
pax.route = str(demand_by_stop.iloc[i]['Srvc_Number']) + '_' + str(
int(demand_by_stop.iloc[i]['Direction']))
stop.pax[pax.id] = pax
pax_num += 1
return stop_list, pax_num
def demand_analysis(engine=None):
if engine is not None:
stop_list = list(engine.busstop_list.keys())
stop_hash = {}
i = 0
for p in stop_list:
stop_hash[p] = i
i += 1
# output data for stack area graph
demand = []
for t in range(24):
d = np.zeros(len(stop_list))
for s in stop_list:
for pid, pax in engine.busstop_list[s].pax.items():
if int((pax.plan_board_time - 0) / 3600) == t:
d[stop_hash[s]] += 1
demand.append(d)
df = pd.DataFrame(demand, columns=[str(i) for i in range(len(stop_list))])
df.to_csv('demand.csv')
return
def sim_validate(engine, data):
actual_onboard = []
sim_onboard = []
sim_travel_cost = []
actual_travel_cost = []
for pid, pax in engine.pax_list.items():
actual_onboard.append(pax.plan_board_time)
sim_onboard.append(pax.onboard_time)
sim_travel_cost.append(abs(pax.onboard_time - pax.alight_time))
actual_travel_cost.append(pax.realcost)
actual_onboard = np.array(actual_onboard)
sim_onboard = np.array(sim_onboard)
actual_travel_cost = np.array(actual_travel_cost)
sim_travel_cost = np.array(sim_travel_cost)
print('Boarding RMSE:%g' % (np.sqrt(np.mean((actual_onboard - sim_onboard) ** 2))))
print('Travel RMSE:%g' % (np.sqrt(np.mean((actual_travel_cost - sim_travel_cost) ** 2))))
sim_comp = pd.DataFrame()
sim_comp['actual_onboard'] = actual_onboard
sim_comp['sim_onboard'] = sim_onboard
sim_comp['sim_travel_cost'] = sim_travel_cost
sim_comp['actual_travel_cost'] = actual_travel_cost
sim_comp.to_csv('G:\\mcgill\\MAS\\gtfs_testbed\\result\\sim_comp' + str(data) + '.csv')
print('ok')
def visualize_pax(engine):
for pax_id, pax in engine.pax_list.items():
if pax.onboard_time < 999999999:
plt.plot([int(pax_id), int(pax_id)], [pax.arr_time, pax.onboard_time])
plt.show()
def train_result_track(eng, ep, qloss_log, ploss_log, log, name='', seed=0):
reward_bus_wise = []
reward_bus_wisep1 = []
reward_bus_wisep2 = []
rs = []
wait_cost = log['wait_cost']
travel_cost = log['travel_cost']
delay = log['delay']
hold_cost = log['hold_cost']
headways_var = log['headways_var']
headways_mean = log['headways_mean']
AOD = log["AOD"]
for bid, r in eng.reward_signal.items():
if len(r) > 0: # .bus_list[bid].forward_bus!=None and engine.bus_list[bid].backward_bus!=None :
reward_bus_wise.append(np.mean(r))
rs += r
reward_bus_wisep1.append(np.mean(eng.reward_signalp1[bid]))
reward_bus_wisep2.append(np.mean(eng.reward_signalp2[bid]))
if ep % 1 == 0:
train_log = pd.DataFrame()
train_log['bunching'] = [log['bunching']]
train_log['ploss'] = [np.mean(ploss_log)]
train_log['qloss'] = [np.mean(qloss_log)]
train_log['reward'] = [np.mean(reward_bus_wise)]
train_log['reward1'] = [np.mean(reward_bus_wisep1)]
train_log['reward2'] = [np.mean(reward_bus_wisep2)]
train_log['avg_hold'] = np.mean(hold_cost)
train_log['action'] = np.mean(np.array(eng.action_record))
train_log['wait'] = [np.mean(wait_cost)]
train_log['travel'] = [np.mean(travel_cost)]
train_log['delay'] = [np.mean(delay)]
train_log['AOD'] = AOD
for k, v in headways_mean.items():
train_log['headway_mean' + str(k)] = [np.mean(v)]
for k, v in headways_var.items():
train_log['headway_var' + str(k)] = [np.mean(v)]
res = pd.DataFrame()
res['stw'] = log['stw']
res['sto'] = log['sto']
res['sth'] = log['sth']
print(
'Episode: %g | reward: %g | reward_var: %g | reward1: %g | reward2: %g | ploss: %g | qloss: %g |\n wait '
'cost: %g | travel cost: %g | max hold :%g| min hold :%g| avg hold :%g | var hold :%g' % (
ep - 1, np.mean(reward_bus_wise), | np.var(rs) | numpy.var |
import numpy as np
def braille():
return {
'a' : np.array([[1, 0], [0, 0], [0, 0]], dtype=bool),
'b' : np.array([[1, 0], [1, 0], [0, 0]], dtype=bool),
'c' : np.array([[1, 1], [0, 0], [0, 0]], dtype=bool),
'd' : np.array([[1, 1], [0, 1], [0, 0]], dtype=bool),
'e' : np.array([[1, 0], [0, 1], [0, 0]], dtype=bool),
'f' : np.array([[1, 1], [1, 0], [0, 0]], dtype=bool),
'g' : np.array([[1, 1], [1, 1], [0, 0]], dtype=bool),
'h' : np.array([[1, 0], [1, 1], [0, 0]], dtype=bool),
'i' : np.array([[0, 1], [1, 0], [0, 0]], dtype=bool),
'j' : np.array([[0, 1], [1, 1], [0, 0]], dtype=bool),
'k' : np.array([[1, 0], [0, 0], [1, 0]], dtype=bool),
'l' : np.array([[1, 0], [1, 0], [1, 0]], dtype=bool),
'm' : np.array([[1, 1], [0, 0], [1, 0]], dtype=bool),
'n' : np.array([[1, 1], [0, 1], [1, 0]], dtype=bool),
'o' : np.array([[1, 0], [0, 1], [1, 0]], dtype=bool),
'p' : np.array([[1, 1], [1, 0], [1, 0]], dtype=bool),
'q' : np.array([[1, 1], [1, 1], [1, 0]], dtype=bool),
'r' : np.array([[1, 0], [1, 1], [1, 0]], dtype=bool),
's' : np.array([[0, 1], [1, 0], [1, 0]], dtype=bool),
't' : np.array([[0, 1], [1, 1], [1, 0]], dtype=bool),
'u' : np.array([[1, 0], [0, 0], [1, 1]], dtype=bool),
'v' : np.array([[1, 0], [1, 0], [1, 1]], dtype=bool),
'w' : np.array([[0, 1], [1, 1], [0, 1]], dtype=bool),
'x' : np.array([[1, 1], [0, 0], [1, 1]], dtype=bool),
'y' : np.array([[1, 1], [0, 1], [1, 1]], dtype=bool),
'z' : np.array([[1, 0], [0, 1], [1, 1]], dtype=bool),
'1' : [np.array([[0, 1], [0, 1], [1, 1]], dtype=bool), np.array([[1, 0], [0, 0], [0, 0]], dtype=bool)],
'2' : [np.array([[0, 1], [0, 1], [1, 1]], dtype=bool), np.array([[1, 0], [1, 0], [0, 0]], dtype=bool)],
'3' : [np.array([[0, 1], [0, 1], [1, 1]], dtype=bool), np.array([[1, 1], [0, 0], [0, 0]], dtype=bool)],
'4' : [np.array([[0, 1], [0, 1], [1, 1]], dtype=bool), np.array([[1, 1], [1, 0], [0, 0]], dtype=bool)],
'5' : [np.array([[0, 1], [0, 1], [1, 1]], dtype=bool), np.array([[1, 0], [0, 1], [0, 0]], dtype=bool)],
'6' : [np.array([[0, 1], [0, 1], [1, 1]], dtype=bool), np.array([[1, 1], [1, 0], [0, 0]], dtype=bool)],
'7' : [np.array([[0, 1], [0, 1], [1, 1]], dtype=bool), np.array([[1, 1], [1, 1], [0, 0]], dtype=bool)],
'8' : [np.array([[0, 1], [0, 1], [1, 1]], dtype=bool), np.array([[1, 0], [1, 1], [0, 0]], dtype=bool)],
'9' : [np.array([[0, 1], [0, 1], [1, 1]], dtype=bool), np.array([[0, 1], [1, 0], [0, 0]], dtype=bool)],
'0' : [np.array([[0, 1], [0, 1], [1, 1]], dtype=bool), | np.array([[0, 1], [1, 1], [0, 0]], dtype=bool) | numpy.array |
import numpy as np
import os
import re
import requests
import sys
import time
from netCDF4 import Dataset
import pandas as pd
from bs4 import BeautifulSoup
from tqdm import tqdm
# setup constants used to access the data from the different M2M interfaces
BASE_URL = 'https://ooinet.oceanobservatories.org/api/m2m/' # base M2M URL
SENSOR_URL = '12576/sensor/inv/' # Sensor Information
# setup access credentials
AUTH = ['OOIAPI-853A3LA6QI3L62', '<KEY>']
def M2M_Call(uframe_dataset_name, start_date, end_date):
options = '?beginDT=' + start_date + '&endDT=' + end_date + '&format=application/netcdf'
r = requests.get(BASE_URL + SENSOR_URL + uframe_dataset_name + options, auth=(AUTH[0], AUTH[1]))
if r.status_code == requests.codes.ok:
data = r.json()
else:
return None
# wait until the request is completed
print('Waiting for OOINet to process and prepare data request, this may take up to 20 minutes')
url = [url for url in data['allURLs'] if re.match(r'.*async_results.*', url)][0]
check_complete = url + '/status.txt'
with tqdm(total=400, desc='Waiting') as bar:
for i in range(400):
r = requests.get(check_complete)
bar.update(1)
if r.status_code == requests.codes.ok:
bar.n = 400
bar.last_print_n = 400
bar.refresh()
print('\nrequest completed in %f minutes.' % elapsed)
break
else:
time.sleep(3)
elapsed = (i * 3) / 60
return data
def M2M_Files(data, tag=''):
"""
Use a regex tag combined with the results of the M2M data request to collect the data from the THREDDS catalog.
Collected data is gathered into an xarray dataset for further processing.
:param data: JSON object returned from M2M data request with details on where the data is to be found for download
:param tag: regex tag to use in discriminating the data files, so we only collect the correct ones
:return: the collected data as an xarray dataset
"""
# Create a list of the files from the request above using a simple regex as a tag to discriminate the files
url = [url for url in data['allURLs'] if re.match(r'.*thredds.*', url)][0]
files = list_files(url, tag)
return files
def list_files(url, tag=''):
"""
Function to create a list of the NetCDF data files in the THREDDS catalog created by a request to the M2M system.
:param url: URL to user's THREDDS catalog specific to a data request
:param tag: regex pattern used to distinguish files of interest
:return: list of files in the catalog with the URL path set relative to the catalog
"""
page = requests.get(url).text
soup = BeautifulSoup(page, 'html.parser')
pattern = re.compile(tag)
return [node.get('href') for node in soup.find_all('a', text=pattern)]
def M2M_Data(nclist,variables):
thredds = 'https://opendap.oceanobservatories.org/thredds/dodsC/ooi/'
#nclist is going to contain more than one url eventually
for jj in range(len(nclist)):
url=nclist[jj]
url=url[25:]
dap_url = thredds + url + '#fillmismatch'
openFile = Dataset(dap_url,'r')
for ii in range(len(variables)):
dum = openFile.variables[variables[ii].name]
variables[ii].data = np.append(variables[ii].data, dum[:].data)
tmp = variables[0].data/60/60/24
time_converted = pd.to_datetime(tmp, unit='D', origin=pd.Timestamp('1900-01-01'))
return variables, time_converted
class var(object):
def __init__(self):
"""A Class that generically holds data with a variable name
and the units as attributes"""
self.name = ''
self.data = np.array([])
self.units = ''
def __repr__(self):
return_str = "name: " + self.name + '\n'
return_str += "units: " + self.units + '\n'
return_str += "data: size: " + str(self.data.shape)
return return_str
class structtype(object):
def __init__(self):
""" A class that imitates a Matlab structure type
"""
self._data = []
def __getitem__(self, index):
"""implement index behavior in the struct"""
if index == len(self._data):
self._data.append(var())
return self._data[index]
def __len__(self):
return len(self._data)
def M2M_URLs(platform_name,node,instrument_class,method):
var_list = structtype()
#MOPAK
if platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSPM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/SBS01/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#METBK
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
#FLORT
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/06-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/06-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/04-FLORTK000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
#FDCHP
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/08-FDCHPA000/telemetered/fdchp_a_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#DOSTA
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/02-DOFSTK000/telemetered/dofst_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
#ADCP
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/01-ADCPTA000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/01-ADCPTC000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/01-ADCPTA000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/01-ADCPTC000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/04-ADCPTM000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/04-ADCPTM000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/04-ADCPTC000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/04-ADCPSJ000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
#ZPLSC
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#WAVSS
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
#VELPT
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
#PCO2W
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
#PHSEN
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
#SPKIR
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
#PRESF
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/02-PRESFA000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/02-PRESFA000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/02-PRESFB000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/02-PRESFC000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
#CTDBP
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/06-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/06-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/03-CTDBPE000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
#VEL3D
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
#VEL3DK
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/01-VEL3DK000/telemetered/vel3d_k_wfp_stc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/03-CTDPFK000/telemetered/ctdpf_ckl_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
#PCO2A
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
#PARAD
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/05-PARADK000/telemetered/parad_k__stc_imodem_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
#OPTAA
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/01-OPTAAC000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#NUTNR
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
##
#MOPAK
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/SBD17/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/SBD17/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSPM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSPM/SBS01/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#METBK
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
#FLORT
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/SBD17/06-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/SBD17/06-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
#FDCHP
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/08-FDCHPA000/recovered_host/fdchp_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#DOSTA
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD37/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD37/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD37/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD37/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
#ADCP
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/01-ADCPTA000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/01-ADCPTC000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/01-ADCPTA000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/01-ADCPTC000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/04-ADCPTM000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/04-ADCPTM000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/04-ADCPTC000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/04-ADCPSJ000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
#WAVSS
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
#VELPT
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/SBD17/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD11/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD11/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
#uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/recovered_host/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD11/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD11/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
#PCO2W
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
#PHSEN
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
#SPKIR
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
#PRESF
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/02-PRESFA000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/02-PRESFA000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/02-PRESFB000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/02-PRESFC000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
#CTDBP
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD37/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/SBD17/06-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD37/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/SBD17/06-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD37/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD37/03-CTDBPE000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
#VEL3D
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/01-VEL3DD000/recovered_host/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/01-VEL3DD000/recovered_host/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/01-VEL3DD000/recovered_host/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/01-VEL3DD000/recovered_host/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
#PCO2A
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
#OPTAA
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD37/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD37/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD37/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD37/01-OPTAAC000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#NUTNR
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD37/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/SBD17/06-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD37/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/SBD17/06-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD37/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD37/03-CTDBPE000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE09OSPM/WFP01/03-CTDPFK000/recovered_wfp/ctdpf_ckl_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID26/01-ADCPTA000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID26/01-ADCPTC000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID26/01-ADCPTA000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID26/01-ADCPTC000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/04-ADCPTM000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/04-ADCPTM000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD35/04-ADCPTC000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD35/04-ADCPSJ000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/SBD17/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/SBD11/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/SBD11/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/SBD17/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/SBD11/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/SBD11/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE09OSPM/WFP01/01-VEL3DK000/recovered_wfp/vel3d_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/01-VEL3DD000/recovered_inst/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/01-VEL3DD000/recovered_inst/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD35/01-VEL3DD000/recovered_inst/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD35/01-VEL3DD000/recovered_inst/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/02-PRESFA000/recovered_inst/presf_abc_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'presf_tide_pressure'
var_list[2].name = 'presf_tide_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/02-PRESFA000/recovered_inst/presf_abc_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'presf_tide_pressure'
var_list[2].name = 'presf_tide_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD35/02-PRESFB000/recovered_inst/presf_abc_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'presf_tide_pressure'
var_list[2].name = 'presf_tide_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD35/02-PRESFC000/recovered_inst/presf_abc_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'presf_tide_pressure'
var_list[2].name = 'presf_tide_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD35/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD35/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD35/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD35/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE09OSPM/WFP01/05-PARADK000/recovered_wfp/parad_k__stc_imodem_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID26/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID26/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID26/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID26/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/SBD12/08-FDCHPA000/recovered_inst/fdchp_a_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/SBD17/06-FLORTD000/recovered_inst/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/SBD17/06-FLORTD000/recovered_inst/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE09OSPM/WFP01/04-FLORTK000/recovered_wfp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE09OSPM/WFP01/02-DOFSTK000/recovered_wfp/dofst_k_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD37/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD37/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD37/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD37/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/04-ADCPTM000/recovered_inst/adcpt_m_instrument_log9_recovered'
var_list[0].name = 'time'
var_list[1].name = 'significant_wave_height'
var_list[2].name = 'peak_wave_period'
var_list[3].name = 'peak_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'seconds'
var_list[3].units = 'degrees'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/04-ADCPTM000/recovered_inst/adcpt_m_instrument_log9_recovered'
var_list[0].name = 'time'
var_list[1].name = 'significant_wave_height'
var_list[2].name = 'peak_wave_period'
var_list[3].name = 'peak_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'seconds'
var_list[3].units = 'degrees'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'CTD' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/06-CTDBPN106/streamed/ctdbp_no_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_no_seawater_pressure'
var_list[5].name = 'ctdbp_no_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'CTD' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/06-CTDBPO108/streamed/ctdbp_no_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_no_seawater_pressure'
var_list[5].name = 'ctdbp_no_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'DOSTA' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/06-CTDBPN106/streamed/ctdbp_no_sample'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'DOSTA' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/06-CTDBPO108/streamed/ctdbp_no_sample'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'PHSEN' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/10-PHSEND103/streamed/phsen_data_record'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'PHSEN' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/10-PHSEND107/streamed/phsen_data_record'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'PCO2W' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/09-PCO2WB103/streamed/pco2w_b_sami_data_record'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'PCO2W' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/09-PCO2WB104/streamed/pco2w_b_sami_data_record'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'ADCP' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/05-ADCPTB104/streamed/adcp_velocity_beam'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'ADCP' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/05-ADCPSI103/streamed/adcp_velocity_beam'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'VEL3D' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/07-VEL3DC108/streamed/vel3d_cd_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'VEL3D' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/07-VEL3DC107/streamed/vel3d_cd_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'OPTAA' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/08-OPTAAD106/streamed/optaa_sample'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'OPTAA' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/08-OPTAAC104/streamed/optaa_sample'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#CSPP Data below
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/08-FLORTJ000/telemetered/flort_dj_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/08-FLORTJ000/recovered_cspp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/08-FLORTJ000/telemetered/flort_dj_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/08-FLORTJ000/recovered_cspp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/02-DOSTAJ000/telemetered/dosta_abcdjm_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/02-DOSTAJ000/recovered_cspp/dosta_abcdjm_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/02-DOSTAJ000/telemetered/dosta_abcdjm_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/02-DOSTAJ000/recovered_cspp/dosta_abcdjm_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/09-CTDPFJ000/telemetered/ctdpf_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/09-CTDPFJ000/recovered_cspp/ctdpf_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/09-CTDPFJ000/telemetered/ctdpf_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/09-CTDPFJ000/recovered_cspp/ctdpf_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/10-PARADJ000/telemetered/parad_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/10-PARADJ000/recovered_cspp/parad_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/10-PARADJ000/telemetered/parad_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/10-PARADJ000/recovered_cspp/parad_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'NUTNR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/06-NUTNRJ000/recovered_cspp/nutnr_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'salinity_corrected_nitrate'
var_list[2].name = 'nitrate_concentration'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'NUTNR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/06-NUTNRJ000/recovered_cspp/nutnr_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'salinity_corrected_nitrate'
var_list[2].name = 'nitrate_concentration'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/07-SPKIRJ000/telemetered/spkir_abj_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/07-SPKIRJ000/recovered_cspp/spkir_abj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/07-SPKIRJ000/telemetered/spkir_abj_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/07-SPKIRJ000/recovered_cspp/spkir_abj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/05-VELPTJ000/telemetered/velpt_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/05-VELPTJ000/recovered_cspp/velpt_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/05-VELPTJ000/telemetered/velpt_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/05-VELPTJ000/recovered_cspp/velpt_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'OPTAA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/04-OPTAAJ000/recovered_cspp/optaa_dj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'OPTAA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/04-OPTAAJ000/recovered_cspp/optaa_dj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/07-FLORTJ000/recovered_cspp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/07-FLORTJ000/recovered_cspp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/01-DOSTAJ000/recovered_cspp/dosta_abcdjm_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/01-DOSTAJ000/recovered_cspp/dosta_abcdjm_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/08-CTDPFJ000/recovered_cspp/ctdpf_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/08-CTDPFJ000/recovered_cspp/ctdpf_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/09-PARADJ000/recovered_cspp/parad_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/09-PARADJ000/recovered_cspp/parad_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'NUTNR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/05-NUTNRJ000/recovered_cspp/nutnr_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'salinity_corrected_nitrate'
var_list[2].name = 'nitrate_concentration'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'NUTNR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/05-NUTNRJ000/recovered_cspp/nutnr_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'salinity_corrected_nitrate'
var_list[2].name = 'nitrate_concentration'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/06-SPKIRJ000/recovered_cspp/spkir_abj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/06-SPKIRJ000/recovered_cspp/spkir_abj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/02-VELPTJ000/recovered_cspp/velpt_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/02-VELPTJ000/recovered_cspp/velpt_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'OPTAA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/04-OPTAAJ000/recovered_cspp/optaa_dj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'OPTAA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/04-OPTAAJ000/recovered_cspp/optaa_dj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL386/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL386/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL384/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL384/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL383/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL383/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL382/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL382/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL381/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL381/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL327/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL327/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL326/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL326/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL320/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL320/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL319/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL319/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL312/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL312/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL311/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL311/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL247/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL247/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL386/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL386/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL384/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL384/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL383/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL383/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL382/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL382/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL381/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL381/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL327/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL327/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL326/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL326/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL320/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL320/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL319/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL319/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL312/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL312/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL311/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL311/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL247/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL247/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL386/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL386/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL384/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL384/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL383/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL383/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL382/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL382/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL381/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL381/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL327/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL327/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL326/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL326/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL320/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL320/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL319/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL319/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL312/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL312/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL311/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL311/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL247/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL247/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL386/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL386/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL384/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL384/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL383/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL383/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL382/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL382/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL381/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL381/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL327/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL327/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL326/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL326/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL320/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL320/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL319/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL319/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL312/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL312/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL311/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL311/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL247/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL247/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL386/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL384/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL383/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL382/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL381/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL327/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL326/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL320/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL319/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL312/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL311/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL247/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD11/06-METBKA000/telemetered/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD11/06-METBKA000/recovered_host/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD11/06-METBKA000/telemetered/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD11/06-METBKA000/recovered_host/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD11/06-METBKA000/telemetered/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD11/06-METBKA000/recovered_host/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD11/06-METBKA000/telemetered/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD11/06-METBKA000/recovered_host/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_mean_directional'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_mean_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_mean_directional'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_mean_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_mean_directional'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_mean_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_mean_directional'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_mean_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_non_directional'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_non_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_non_directional'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_non_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_non_directional'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_non_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_non_directional'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_non_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_motion'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_motion_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_motion'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_motion_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_motion'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_motion_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_motion'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_motion_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_fourier'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_fourier_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_fourier'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_fourier_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_fourier'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_fourier_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_fourier'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_fourier_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/2A-CTDPFA107/streamed/ctdpf_sbe43_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'seawater_pressure'
var_list[5].name = 'seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSPD' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSPD/DP01B/01-CTDPFL105/recovered_inst/dpc_ctd_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'dpc_ctd_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSPD' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE04OSPD/DP01B/01-CTDPFL105/recovered_wfp/dpc_ctd_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'dpc_ctd_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/2A-CTDPFA107/streamed/ctdpf_sbe43_sample'
var_list[0].name = 'time'
var_list[1].name = 'corrected_dissolved_oxygen'
var_list[2].name = 'seawater_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'dbar'
elif platform_name == 'CE04OSPD' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSPD/DP01B/06-DOSTAD105/recovered_inst/dpc_optode_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE04OSPD' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE04OSPD/DP01B/06-DOSTAD105/recovered_wfp/dpc_optode_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/3A-FLORTD104/streamed/flort_d_data_record'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE04OSPD' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSPD/DP01B/04-FLNTUA103/recovered_inst/dpc_flnturtd_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'flntu_x_mmp_cds_fluorometric_chlorophyll_a'
var_list[2].name = 'flntu_x_mmp_cds_total_volume_scattering_coefficient '
var_list[3].name = 'flntu_x_mmp_cds_bback_total'
var_list[4].name = 'flcdr_x_mmp_cds_fluorometric_cdom'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'ug/L'
var_list[2].units = 'm-1 sr-1'
var_list[3].units = 'm-1'
var_list[4].units = 'ppb'
var_list[5].units = 'dbar'
elif platform_name == 'CE04OSPD' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE04OSPD/DP01B/03-FLCDRA103/recovered_wfp/dpc_flcdrtd_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'flntu_x_mmp_cds_fluorometric_chlorophyll_a'
var_list[2].name = 'flntu_x_mmp_cds_total_volume_scattering_coefficient '
var_list[3].name = 'flntu_x_mmp_cds_bback_total'
var_list[4].name = 'flcdr_x_mmp_cds_fluorometric_cdom'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'ug/L'
var_list[2].units = 'm-1 sr-1'
var_list[3].units = 'm-1'
var_list[4].units = 'ppb'
var_list[5].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'PHSEN' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/2B-PHSENA108/streamed/phsen_data_record'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'ph_seawater'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/3C-PARADA102/streamed/parad_sa_sample'
var_list[0].name = 'time'
var_list[1].name = 'par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/3D-SPKIRA102/streamed/spkir_data_record'
var_list[0].name = 'time'
var_list[1].name = 'spkir_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'NUTNR' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/4A-NUTNRA102/streamed/nutnr_a_sample'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'PCO2W' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/4F-PCO2WA102/streamed/pco2w_a_sami_data_record'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
var_list[3].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/4B-VELPTD106/streamed/velpt_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'velpt_d_eastward_velocity'
var_list[2].name = 'velpt_d_northward_velocity'
var_list[3].name = 'velpt_d_upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[9].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
var_list[9].units = 'dbar'
elif platform_name == 'CE04OSPD' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSPD/DP01B/02-VEL3DA105/recovered_inst/dpc_acm_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_a_eastward_velocity'
var_list[2].name = 'vel3d_a_northward_velocity'
var_list[3].name = 'vel3d_a_upward_velocity_ascending'
var_list[4].name = 'vel3d_a_upward_velocity_descending'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'm/s'
var_list[5].units = 'dbar'
elif platform_name == 'CE04OSPD' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE04OSPD/DP01B/02-VEL3DA105/recovered_wfp/dpc_acm_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_a_eastward_velocity'
var_list[2].name = 'vel3d_a_northward_velocity'
var_list[3].name = 'vel3d_a_upward_velocity_ascending'
var_list[4].name = 'vel3d_a_upward_velocity_descending'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'm/s'
var_list[5].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PLATFORM200M' and instrument_class == 'CTD' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/PC01B/4A-CTDPFA109/streamed/ctdpf_optode_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'seawater_pressure'
var_list[5].name = 'seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSPS' and node == 'PLATFORM200M' and instrument_class == 'DOSTA' and method == 'Streamed':
#uframe_dataset_name = 'CE04OSPS/PC01B/4A-DOSTAD109/streamed/ctdpf_optode_sample'
uframe_dataset_name = 'CE04OSPS/PC01B/4A-CTDPFA109/streamed/ctdpf_optode_sample'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'seawater_pressure' #also use this for the '4A-DOSTAD109/streamed/ctdpf_optode_sample' stream
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PLATFORM200M' and instrument_class == 'PHSEN' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/PC01B/4B-PHSENA106/streamed/phsen_data_record'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE04OSPS' and node == 'PLATFORM200M' and instrument_class == 'PCO2W' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/PC01B/4D-PCO2WA105/streamed/pco2w_a_sami_data_record'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
#Coastal Pioneer CSM Data Streams
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'METBK2' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD12/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'METBK2' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD12/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CP03ISSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CP03ISSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CP04OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CP04OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
#WAVSS
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_mean_directional'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_mean_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_non_directional'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_non_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_motion'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_motion_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_fourier'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_fourier_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
#PCO2A
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CP03ISSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CP04OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
#PCO2A
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CP03ISSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CP04OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
#FDCHP
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'RecoveredInst':
uframe_dataset_name = 'CP01CNSM/SBD12/08-FDCHPA000/recovered_inst/fdchp_a_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD12/08-FDCHPA000/telemetered/fdchp_a_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD12/08-FDCHPA000/recovered_host/fdchp_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD11/06-METBKA000/telemetered/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD11/06-METBKA000/recovered_host/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CP03ISSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/SBD11/06-METBKA000/telemetered/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CP03ISSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/SBD11/06-METBKA000/recovered_host/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CP04OSSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/SBD11/06-METBKA000/telemetered/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CP04OSSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/SBD11/06-METBKA000/recovered_host/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'METBK2-hr' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD12/06-METBKA000/telemetered/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'METBK2-hr' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD12/06-METBKA000/recovered_host/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CP01CNSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CP03ISSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CP04OSSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/MFD37/03-CTDBPE000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/MFD37/03-CTDBPE000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CP04OSSM/MFD37/03-CTDBPE000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/MFD37/03-CTDBPD000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/MFD37/03-CTDBPD000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CP03ISSM/MFD37/03-CTDBPD000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/MFD37/03-CTDBPD000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/MFD37/03-CTDBPD000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CP01CNSM/MFD37/03-CTDBPD000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/MFD37/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/MFD37/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/MFD37/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CP01CNSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CP03ISSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CP04OSSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CP01CNSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CP03ISSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CP04OSSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CP01CNSM/MFD35/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CP03ISSM/MFD35/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CP04OSSM/MFD35/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CP01CNSM/MFD35/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CP03ISSM/MFD35/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CP04OSSM/MFD35/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/MFD35/02-PRESFB000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredInst':
uframe_dataset_name = 'CP01CNSM/MFD35/02-PRESFB000/recovered_inst/presf_abc_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'presf_tide_pressure'
var_list[2].name = 'presf_tide_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/MFD35/02-PRESFB000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/MFD35/02-PRESFB000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredInst':
uframe_dataset_name = 'CP03ISSM/MFD35/02-PRESFB000/recovered_inst/presf_abc_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'presf_tide_pressure'
var_list[2].name = 'presf_tide_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/MFD35/02-PRESFB000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/MFD35/02-PRESFC000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredInst':
uframe_dataset_name = 'CP04OSSM/MFD35/02-PRESFC000/recovered_inst/presf_abc_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'presf_tide_pressure'
var_list[2].name = 'presf_tide_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/MFD35/02-PRESFC000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CP01CNSM/MFD35/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/MFD35/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/MFD35/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CP03ISSM/MFD35/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/MFD35/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/MFD35/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CP04OSSM/MFD35/04-VELPTB000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/MFD35/04-VELPTB000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/MFD35/04-VELPTB000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/MFD37/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/MFD37/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/MFD37/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/MFD37/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/MFD37/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/MFD37/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CP01CNSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CP03ISSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CP04OSSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/MFD35/01-ADCPTF000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CP01CNSM/MFD35/01-ADCPTF000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/MFD35/01-ADCPTF000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CP03ISSM/MFD35/01-ADCPTF000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/MFD35/01-ADCPSJ000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CP04OSSM/MFD35/01-ADCPSJ000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
#Coastal Pioneer WireFollowing Profilers (WFP
elif platform_name == 'CP04OSPM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSPM/SBS11/02-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP04OSPM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSPM/SBS11/02-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP04OSPM' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSPM/WFP01/04-FLORTK000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CP04OSPM' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP04OSPM/WFP01/04-FLORTK000/recovered_wfp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CP04OSPM' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSPM/WFP01/02-DOFSTK000/telemetered/dofst_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
elif platform_name == 'CP04OSPM' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP04OSPM/WFP01/02-DOFSTK000/recovered_wfp/dofst_k_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
elif platform_name == 'CP04OSPM' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSPM/WFP01/01-VEL3DK000/telemetered/vel3d_k_wfp_stc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CP04OSPM' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP04OSPM/WFP01/01-VEL3DK000/recovered_wfp/vel3d_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CP04OSPM' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSPM/WFP01/03-CTDPFK000/telemetered/ctdpf_ckl_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP04OSPM' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP04OSPM/WFP01/03-CTDPFK000/recovered_wfp/ctdpf_ckl_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP04OSPM' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSPM/WFP01/05-PARADK000/telemetered/parad_k__stc_imodem_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CP04OSPM' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP04OSPM/WFP01/05-PARADK000/recovered_wfp/parad_k__stc_imodem_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CP01CNPM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNPM/SBS01/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNPM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNPM/SBS01/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNPM' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNPM/WFP01/04-FLORTK000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CP01CNPM' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP01CNPM/WFP01/04-FLORTK000/recovered_wfp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CP01CNPM' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNPM/WFP01/02-DOFSTK000/telemetered/dofst_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
elif platform_name == 'CP01CNPM' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP01CNPM/WFP01/02-DOFSTK000/recovered_wfp/dofst_k_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
elif platform_name == 'CP01CNPM' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNPM/WFP01/01-VEL3DK000/telemetered/vel3d_k_wfp_stc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CP01CNPM' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP01CNPM/WFP01/01-VEL3DK000/recovered_wfp/vel3d_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CP01CNPM' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNPM/WFP01/03-CTDPFK000/telemetered/ctdpf_ckl_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP01CNPM' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP01CNPM/WFP01/03-CTDPFK000/recovered_wfp/ctdpf_ckl_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP01CNPM' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNPM/WFP01/05-PARADK000/telemetered/parad_k__stc_imodem_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CP01CNPM' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP01CNPM/WFP01/05-PARADK000/recovered_wfp/parad_k__stc_imodem_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CP02PMCI' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMCI/SBS01/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP02PMCI' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CP02PMCI/SBS01/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP02PMCI' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMCI/WFP01/04-FLORTK000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CP02PMCI' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMCI/WFP01/04-FLORTK000/recovered_wfp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = | np.array([]) | numpy.array |
import numpy as np
class SECS:
"""Spherical Elementary Current System (SECS).
The algorithm is implemented directly in spherical coordinates
from the equations of the 1999 Amm & Viljanen paper [1]_.
Parameters
----------
sec_df_loc : ndarray (nsec x 3 [lat, lon, r])
The latitude, longiutde, and radius of the divergence free (df) SEC locations.
sec_cf_loc : ndarray (nsec x 3 [lat, lon, r])
The latitude, longiutde, and radius of the curl free (cf) SEC locations.
References
----------
.. [1] <NAME>., and <NAME>. "Ionospheric disturbance magnetic field continuation
from the ground to the ionosphere using spherical elementary current systems."
Earth, Planets and Space 51.6 (1999): 431-440. doi:10.1186/BF03352247
"""
def __init__(self, sec_df_loc=None, sec_cf_loc=None):
if sec_df_loc is None and sec_cf_loc is None:
raise ValueError("Must initialize the object with SEC locations")
self.sec_df_loc = sec_df_loc
self.sec_cf_loc = sec_cf_loc
if self.sec_df_loc is not None:
self.sec_df_loc = np.asarray(sec_df_loc)
if self.sec_df_loc.shape[-1] != 3:
raise ValueError("SEC DF locations must have 3 columns (lat, lon, r)")
if self.sec_df_loc.ndim == 1:
# Add an empty dimension if only one SEC location is passed in
self.sec_df_loc = self.sec_df_loc[np.newaxis, ...]
if self.sec_cf_loc is not None:
self.sec_cf_loc = np.asarray(sec_cf_loc)
if self.sec_cf_loc.shape[-1] != 3:
raise ValueError("SEC CF locations must have 3 columns (lat, lon, r)")
if self.sec_cf_loc.ndim == 1:
# Add an empty dimension if only one SEC location is passed in
self.sec_cf_loc = self.sec_cf_loc[np.newaxis, ...]
# Storage of the scaling factors
self.sec_amps = None
self.sec_amps_var = None
@property
def has_df(self):
"""Whether this system has any divergence free currents."""
return self.sec_df_loc is not None
@property
def has_cf(self):
"""Whether this system has any curl free currents."""
return self.sec_cf_loc is not None
@property
def nsec(self):
"""The number of elementary currents in this system."""
nsec = 0
if self.has_df:
nsec += len(self.sec_df_loc)
if self.has_cf:
nsec += len(self.sec_cf_loc)
return nsec
def fit(self, obs_loc, obs_B, obs_std=None, epsilon=0.05):
"""Fits the SECS to the given observations.
Given a number of observation locations and measurements,
this function fits the SEC system to them. It uses singular
value decomposition (SVD) to fit the SEC amplitudes with the
`epsilon` parameter used to regularize the solution.
Parameters
----------
obs_locs : ndarray (nobs x 3 [lat, lon, r])
Contains latitude, longitude, and radius of the observation locations
(place where the measurements are made)
obs_B: ndarray (ntimes x nobs x 3 [Bx, By, Bz])
An array containing the measured/observed B-fields.
obs_std : ndarray (ntimes x nobs x 3 [varX, varY, varZ]), optional
Standard error of vector components at each observation location.
This can be used to weight different observations more/less heavily.
An infinite value eliminates the observation from the fit.
Default: ones(nobs x 3) equal weights
epsilon : float
Value used to regularize/smooth the SECS amplitudes. Multiplied by the
largest singular value obtained from SVD.
Default: 0.05
"""
if obs_loc.shape[-1] != 3:
raise ValueError("Observation locations must have 3 columns (lat, lon, r)")
if obs_B.ndim == 2:
# Just a single snapshot given, so expand the dimensionality
obs_B = obs_B[np.newaxis, ...]
# Assume unit standard error of all measurements
if obs_std is None:
obs_std = np.ones(obs_B.shape)
ntimes = len(obs_B)
# Calculate the transfer functions
T_obs = self._calc_T(obs_loc)
# Store the fit sec_amps in the object
self.sec_amps = np.empty((ntimes, self.nsec))
self.sec_amps_var = np.empty((ntimes, self.nsec))
# Calculate the singular value decomposition (SVD)
# NOTE: T_obs has shape (nobs, 3, nsec), we reshape it
# to (nobs*3, nsec); obs_std has shape (ntimes, nobs, 3),
# we reshape it to (ntimes, nobs*3), then loop over ntimes
# to solve using (potentially) time-dependent observation
# standard errors to weight the observations
for i in range(ntimes):
# Only (re-)calculate SVD when necessary
if i == 0 or not np.all(obs_std[i] == obs_std[i-1]):
# Weight T_obs with obs_std
svd_in = (T_obs.reshape(-1, self.nsec) /
obs_std[i].ravel()[:, np.newaxis])
# Find singular value decompostion
U, S, Vh = np.linalg.svd(svd_in, full_matrices=False)
# Eliminate singular values less than epsilon by setting their
# reciprocal to zero (setting S to infinity firsts avoids
# divide-by-zero warings)
S[S < epsilon * S.max()] = np.inf
W = 1./S
# Update VWU if obs_std changed
VWU = Vh.T @ (np.diag(W) @ U.T)
# Solve for SEC amplitudes and error variances
# shape: ntimes x nsec
self.sec_amps[i, :] = (VWU @ (obs_B[i] / obs_std[i]).reshape(-1).T).T
# Maybe we want the variance of the predictions sometime later...?
# shape: ntimes x nsec
valid = np.isfinite(obs_std[i].reshape(-1))
self.sec_amps_var[i, :] = np.sum(
(VWU[:,valid] * obs_std[i].reshape(-1)[valid])**2,
axis=1)
return self
def fit_unit_currents(self):
"""Sets all SECs to a unit current amplitude."""
self.sec_amps = np.ones((1, self.nsec))
return self
def predict(self, pred_loc, J=False):
"""Calculate the predicted magnetic field or currents.
After a set of observations has been fit to this system we can
predict the magnetic fields or currents at any other location. This
function uses those fit amplitudes to predict at the requested locations.
Parameters
----------
pred_loc: ndarray (npred x 3 [lat, lon, r])
An array containing the locations where the predictions are desired.
J: boolean
Whether to predict currents (J=True) or magnetic fields (J=False)
Default: False (magnetic field prediction)
Returns
-------
ndarray (ntimes x npred x 3 [lat, lon, r])
The predicted values calculated from the current amplitudes that were
fit to this system.
"""
if pred_loc.shape[-1] != 3:
raise ValueError("Prediction locations must have 3 columns (lat, lon, r)")
if self.sec_amps is None:
raise ValueError("There are no currents associated with the SECs," +
"you need to call .fit() first to fit to some observations.")
# T_pred shape=(npred x 3 x nsec)
# sec_amps shape=(nsec x ntimes)
if J:
# Predicting currents
T_pred = self._calc_J(pred_loc)
else:
# Predicting magnetic fields
T_pred = self._calc_T(pred_loc)
# NOTE: dot product is slow on multi-dimensional arrays (i.e. > 2 dimensions)
# Therefore this is implemented as tensordot, and the arguments are
# arranged to eliminate needs of transposing things later.
# The dot product is done over the SEC locations, so the final output
# is of shape: (ntimes x npred x 3)
return np.squeeze(np.tensordot(self.sec_amps, T_pred, (1, 2)))
def predict_B(self, pred_loc):
"""Calculate the predicted magnetic fields.
After a set of observations has been fit to this system we can
predict the magnetic fields or currents at any other location. This
function uses those fit amplitudes to predict at the requested locations.
Parameters
----------
pred_loc: ndarray (npred x 3 [lat, lon, r])
An array containing the locations where the predictions are desired.
Returns
-------
ndarray (ntimes x npred x 3 [lat, lon, r])
The predicted values calculated from the current amplitudes that were
fit to this system.
"""
return self.predict(pred_loc)
def predict_J(self, pred_loc):
"""Calculate the predicted currents.
After a set of observations has been fit to this system we can
predict the magnetic fields or currents at any other location. This
function uses those fit amplitudes to predict at the requested locations.
Parameters
----------
pred_loc: ndarray (npred x 3 [lat, lon, r])
An array containing the locations where the predictions are desired.
Returns
-------
ndarray (ntimes x npred x 3 [lat, lon, r])
The predicted values calculated from the current amplitudes that were
fit to this system.
"""
return self.predict(pred_loc, J=True)
def _calc_T(self, obs_loc):
"""Calculates the T transfer matrix.
The magnetic field transfer matrix to go from SEC locations to observation
locations. It assumes unit current amplitudes that will then be
scaled with the proper amplitudes later.
"""
if self.has_df:
T = T_df(obs_loc=obs_loc, sec_loc=self.sec_df_loc)
if self.has_cf:
T1 = T_cf(obs_loc=obs_loc, sec_loc=self.sec_cf_loc)
# df is already present in T
if self.has_df:
T = np.concatenate([T, T1], axis=2)
else:
T = T1
return T
def _calc_J(self, obs_loc):
"""Calculates the J transfer matrix.
The current transfer matrix to go from SEC locations to observation
locations. It assumes unit current amplitudes that will then be
scaled with the proper amplitudes later.
"""
if self.has_df:
J = J_df(obs_loc=obs_loc, sec_loc=self.sec_df_loc)
if self.has_cf:
J1 = J_cf(obs_loc=obs_loc, sec_loc=self.sec_cf_loc)
# df is already present in T
if self.has_df:
J = np.concatenate([J, J1], axis=2)
else:
J = J1
return J
def T_df(obs_loc, sec_loc):
"""Calculates the divergence free magnetic field transfer function.
The transfer function goes from SEC location to observation location
and assumes unit current SECs at the given locations.
Parameters
----------
obs_loc : ndarray (nobs, 3 [lat, lon, r])
The locations of the observation points.
sec_loc : ndarray (nsec, 3 [lat, lon, r])
The locations of the SEC points.
Returns
-------
ndarray (nobs, 3, nsec)
The T transfer matrix.
"""
nobs = len(obs_loc)
nsec = len(sec_loc)
obs_r = obs_loc[:, 2][:, np.newaxis]
sec_r = sec_loc[:, 2][np.newaxis, :]
theta = calc_angular_distance(obs_loc[:, :2], sec_loc[:, :2])
alpha = calc_bearing(obs_loc[:, :2], sec_loc[:, :2])
# magnetic permeability
mu0 = 4*np.pi*1e-7
# simplify calculations by storing this ratio
x = obs_r/sec_r
sin_theta = np.sin(theta)
cos_theta = np.cos(theta)
factor = 1./np.sqrt(1 - 2*x*cos_theta + x**2)
# Amm & Viljanen: Equation 9
Br = mu0/(4*np.pi*obs_r) * (factor - 1)
# Amm & Viljanen: Equation 10 (transformed to try and eliminate trig operations and
# divide by zeros)
Btheta = -mu0/(4*np.pi*obs_r) * (factor*(x - cos_theta) + cos_theta)
# If sin(theta) == 0: Btheta = 0
# There is a possible 0/0 in the expansion when sec_loc == obs_loc
Btheta = np.divide(Btheta, sin_theta, out=np.zeros_like(sin_theta),
where=sin_theta != 0)
# When observation points radii are outside of the sec locations
under_locs = sec_r < obs_r
# NOTE: If any SECs are below observations the math will be done on all points.
# This could be updated to only work on the locations where this condition
# occurs, but would make the code messier, with minimal performance gain
# except for very large matrices.
if np.any(under_locs):
# Flipped from previous case
x = sec_r/obs_r
# Amm & Viljanen: Equation A.7
Br2 = mu0*x/(4*np.pi*obs_r) * (1./np.sqrt(1 - 2*x*cos_theta + x**2) - 1)
# Amm & Viljanen: Equation A.8
Btheta2 = - mu0 / (4*np.pi*obs_r) * ((obs_r-sec_r*cos_theta) /
np.sqrt(obs_r**2 -
2*obs_r*sec_r*cos_theta +
sec_r**2) - 1)
Btheta2 = np.divide(Btheta2, sin_theta, out=np.zeros_like(sin_theta),
where=sin_theta != 0)
# Update only the locations where secs are under observations
Btheta[under_locs] = Btheta2[under_locs]
Br[under_locs] = Br2[under_locs]
# Transform back to Bx, By, Bz at each local point
T = | np.empty((nobs, 3, nsec)) | numpy.empty |
"""
Procedures needed for variable importance estimation.
Created on Thu Dec 8 09:11:57 2020.
@author: MLechner
# -*- coding: utf-8 -*-
"""
from concurrent import futures
import math
import numpy as np
import ray
from mcf import mcf_forest_functions as mcf_forest
from mcf import mcf_data_functions as mcf_data
from mcf import general_purpose as gp
from mcf import general_purpose_system_files as gp_sys
from mcf import general_purpose_mcf as gp_mcf
def variable_importance(indatei, forest, v_dict, v_x_type, v_x_values,
c_dictin, x_name_mcf, regrf=False):
"""Compute variable importance measure.
Parameters
----------
indatei: Str.
forest : List of list. Estimated forest.
v_dict : DICT. Variables.
v_x_type : List of Int.
v_x_values : List of Int.
c_dictin : DICT. Parameters
x_name_mcf : List. Variable names from MCF procedure
regrf : Bool.
Returns
-------
vim : Dictionary. Variable importance measures and names of variable
Procedure:
a) Predict Total of OOB of estimated forest_est (should already be there)
b) For each variable, randomize one or groups of covariates
c) recompute OOB-MSE with the ys of these more noisy variables
Use multiprocessing in new oob prediction in the same way as in forest
Building
Outer loop over variables and group of variables
Inner loop: Loop over trees
Initially take out the indices of leaf 0 [16]--> OOB data
For every observation determine it terminal leaf
- save index together with number of terminal leaf
For all terminal leaves compute OOB prediction
"""
if c_dictin['with_output'] and c_dictin['verbose']:
print('\nVariable importance measures (OOB data)')
print('\nSingle variables')
(x_name, _, _, c_dict, _, data_np, y_i, y_nn_i, x_i, _, _, d_i, w_i, _
) = mcf_data.prepare_data_for_forest(indatei, v_dict, v_x_type,
v_x_values, c_dictin, regrf=regrf)
no_of_vars = len(x_name)
partner_k = determine_partner_k(x_name)
# Loop over all variables to get respective OOB values of MSE
if x_name != x_name_mcf:
raise Exception('Wrong order of variable names', x_name, x_name_mcf)
number_of_oobs = 1 + no_of_vars
oob_values = [None] * number_of_oobs
if c_dict['no_parallel'] < 1.5:
maxworkers = 1
else:
if c_dict['mp_automatic']:
maxworkers = gp_mcf.find_no_of_workers(c_dict['no_parallel'],
c_dict['sys_share'])
else:
maxworkers = c_dict['no_parallel']
if c_dict['with_output'] and c_dict['verbose']:
print('Number of parallel processes: ', maxworkers)
if c_dict['mp_with_ray'] and maxworkers > 1:
if c_dict['mem_object_store_2'] is None:
ray.init(num_cpus=maxworkers, include_dashboard=False)
else:
ray.init(num_cpus=maxworkers, include_dashboard=False,
object_store_memory=c_dict['mem_object_store_2'])
if c_dict['with_output'] and c_dict['verbose']:
print("Size of Ray Object Store: ",
round(c_dict['mem_object_store_2']/(1024*1024)), " MB")
data_np_ref = ray.put(data_np)
forest_ref = ray.put(forest)
if (c_dict['mp_type_vim'] == 2 and not c_dict['mp_with_ray']) or (
maxworkers == 1):
for jdx in range(number_of_oobs):
oob_values[jdx], _ = get_oob_mcf(
data_np, y_i, y_nn_i, x_i, d_i, w_i, c_dict, jdx, True, [],
forest, False, regrf, partner_k[jdx])
if c_dict['with_output'] and c_dict['verbose']:
gp.share_completed(jdx+1, number_of_oobs)
else: # Fast but needs a lot of memory because it copied a lot
maxworkers = min(maxworkers, number_of_oobs)
if c_dict['mp_with_ray']:
tasks = [ray_get_oob_mcf.remote(
data_np_ref, y_i, y_nn_i, x_i, d_i, w_i, c_dict, idx, True, [],
forest_ref, True, regrf, partner_k[idx])
for idx in range(number_of_oobs)]
still_running = list(tasks)
jdx = 0
while len(still_running) > 0:
finished, still_running = ray.wait(still_running)
finished_res = ray.get(finished)
for res in finished_res:
iix = res[1]
oob_values[iix] = res[0]
if c_dict['with_output'] and c_dict['verbose']:
gp.share_completed(jdx+1, number_of_oobs)
jdx += 1
else:
with futures.ProcessPoolExecutor(max_workers=maxworkers) as fpp:
ret_fut = {fpp.submit(
get_oob_mcf, data_np, y_i, y_nn_i, x_i, d_i, w_i, c_dict,
idx, True, [], forest, True, regrf, partner_k[idx]):
idx for idx in range(number_of_oobs)}
for jdx, frv in enumerate(futures.as_completed(ret_fut)):
results_fut = frv.result()
del ret_fut[frv]
del frv
iix = results_fut[1]
oob_values[iix] = results_fut[0]
if c_dict['with_output'] and c_dict['verbose']:
gp.share_completed(jdx+1, number_of_oobs)
oob_values = np.array(oob_values)
if regrf:
oob_values = oob_values.reshape(-1)
mse_ref = oob_values[0] # reference value
vim = vim_print(mse_ref, oob_values[1:], x_name, 0, c_dict['with_output'],
True, partner_k)
# Variables are grouped
no_g, no_m_g = number_of_groups_vi(no_of_vars)
partner_k = None
if no_g > 0:
if c_dict['with_output']:
print('\nGroups of variables')
ind_groups = vim_grouping(vim, no_g)
n_g = len(ind_groups)
oob_values = [None] * n_g
if c_dict['mp_with_ray'] and maxworkers > 1:
tasks = [ray_get_oob_mcf.remote(
data_np_ref, y_i, y_nn_i, x_i, d_i, w_i, c_dict, idx, False,
ind_groups, forest_ref, True, regrf, partner_k)
for idx in range(n_g)]
still_running = list(tasks)
idx = 0
while len(still_running) > 0:
finished, still_running = ray.wait(still_running)
finished_res = ray.get(finished)
for res in finished_res:
iix = res[1]
oob_values[iix] = res[0]
if c_dict['with_output'] and c_dict['verbose']:
gp.share_completed(idx+1, n_g)
idx += 1
else:
for idx in range(n_g):
oob_values[idx], _ = get_oob_mcf(
data_np, y_i, y_nn_i, x_i, d_i, w_i, c_dict, idx, False,
ind_groups, forest, False, regrf, partner_k)
if c_dict['with_output'] and c_dict['verbose']:
gp.share_completed(idx+1, n_g)
if regrf:
oob_values = np.array(oob_values)
oob_values = oob_values.reshape(-1)
vim_g = vim_print(mse_ref, np.array(oob_values), x_name, ind_groups,
c_dict['with_output'], False)
else:
vim_g = None
# Groups are accumulated from worst to best
if no_m_g > 0:
if c_dict['with_output']:
print('\nMerged groups of variables')
ind_groups = vim_grouping(vim_g, no_m_g, True)
n_g = len(ind_groups)
oob_values = [None] * n_g
if c_dict['mp_with_ray'] and maxworkers > 1:
tasks = [ray_get_oob_mcf.remote(
data_np_ref, y_i, y_nn_i, x_i, d_i, w_i, c_dict, idx, False,
ind_groups, forest_ref, True, regrf, partner_k)
for idx in range(n_g)]
still_running = list(tasks)
idx = 0
while len(still_running) > 0:
finished, still_running = ray.wait(still_running)
finished_res = ray.get(finished)
for res in finished_res:
iix = res[1]
oob_values[iix] = res[0]
if c_dict['with_output'] and c_dict['verbose']:
gp.share_completed(idx+1, n_g)
idx += 1
else:
for idx in range(n_g):
oob_values[idx], _ = get_oob_mcf(
data_np, y_i, y_nn_i, x_i, d_i, w_i, c_dict, idx, False,
ind_groups, forest, False, regrf, partner_k)
if c_dict['with_output'] and c_dict['verbose']:
gp.share_completed(idx+1, n_g)
if regrf:
oob_values = np.array(oob_values)
oob_values = oob_values.reshape(-1)
vim_mg = vim_print(mse_ref, np.array(oob_values), x_name, ind_groups,
c_dict['with_output'], False)
else:
vim_mg = None
if c_dict['mp_with_ray']:
del finished, still_running, data_np_ref, forest_ref, tasks
ray.shutdown()
return vim, vim_g, vim_mg, x_name
def number_of_groups_vi(no_x_names):
"""Determine no of groups for groupwise variable importance measure.
Parameters
----------
no_x_names :INT. No of variables considered in analysis.
Returns
-------
groups : INT.
merged_groups : INT.
"""
if no_x_names >= 100:
groups = 20
merged_groups = 19
elif 20 <= no_x_names < 100:
groups = 10
merged_groups = 9
elif 10 <= no_x_names < 20:
groups = 5
merged_groups = 4
elif 4 <= no_x_names < 10:
groups = 2
merged_groups = 0
else:
groups = 0
merged_groups = 0
return groups, merged_groups
def vim_grouping(vim, no_groups, accu=False):
"""Group variables according to their variable importance measure.
Parameters
----------
vim : Tuple (Numpy array list of INT). Relative vim and index.
no_g : INT. No of groups.
accu : Bool. Build groups by accumulation. Default = False.
Returns
-------
ind_groups : List of list of INT. Grouping of indices.
"""
indices = vim[1]
no_ind = len(indices)
if not accu:
group_size = no_ind / no_groups
group_size_int = math.floor(group_size)
one_more = 0
start_i = 0
ind_groups = []
for idx in range(no_groups):
if accu:
if idx < 2:
ind_groups = [indices[0], indices[0] + indices[idx]]
else:
new_group = ind_groups[idx-1] + indices[idx]
ind_groups.append(new_group)
else:
if idx == (no_groups - 1):
end_i = no_ind - 1
else:
end_i = start_i + group_size_int - 1
one_more += group_size - group_size_int
if one_more >= 1:
one_more -= 1
end_i += 1
ind_groups.append(indices[start_i:end_i+1])
start_i = end_i + 1
return ind_groups
def vim_print(mse_ref, mse_values, x_name, ind_list=0, with_output=True,
single=True, partner_k=None):
"""Print Variable importance measure and create sorted output.
Parameters
----------
mse_ref : Numpy Float. Reference value of non-randomized x.
mse_values : Numpy array. MSE's for randomly permuted x.
x_name : List of strings. Variable names.
ind_list : List of INT, optional. Variable positions. Default is 0.
with_output : Boolean, optional. Default is True.
single : Boolean, optional. The default is True.
partner_k : List of None and Int or None. Index of variables that were
jointly randomized. Default is None.
Returns
-------
vim: Tuple of Numpy array and list of lists. MSE sorted and sort index.
"""
if partner_k is not None:
for idx, val in enumerate(partner_k):
if val is not None:
if (idx > (val-1)) and (idx > 0):
mse_values[idx-1] = mse_values[val-1]
mse = mse_values / np.array(mse_ref) * 100
var_indices = np.argsort(mse)
var_indices = np.flip(var_indices)
vim_sorted = mse[var_indices]
if single:
x_names_sorted = | np.array(x_name, copy=True) | numpy.array |
#!/usr/bin.env/python
# -*- coding: utf-8 -*-
"""
This module provides normalisation methods using landmark registration, first described
with application to cytometry data by Hahne et al [1] with further expansion by Finak et al [2].
Landmark registration is implemented in the LandmarkReg class using Scikit-FDA.
[1] <NAME>, <NAME>, <NAME>, <NAME>, Gascoyne RD,
<NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>,
<NAME>. Per-channel basis normalization methods for flow cytometry data.
Cytometry A. 2010 Feb;77(2):121-31. doi: 10.1002/cyto.a.20823. PMID: 19899135; PMCID: PMC3648208.
[2] <NAME>, <NAME>, <NAME>, et al. High-throughput flow cytometry data normalization
for clinical trials. Cytometry A. 2014;85(3):277-286. doi:10.1002/cyto.a.22433
Copyright 2020 <NAME>
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without restriction,
including without limitation the rights to use, copy, modify,
merge, publish, distribute, sublicense, and/or sell copies of the
Software, and to permit persons to whom the Software is furnished
to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from skfda.preprocessing.registration import landmark_registration_warping, landmark_shift_deltas
from skfda.representation.grid import FDataGrid
from sklearn.cluster import KMeans
from detecta import detect_peaks
from KDEpy import FFTKDE
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
__author__ = "<NAME>"
__copyright__ = "Copyright 2020, cytopy"
__credits__ = ["<NAME>", "<NAME>", "<NAME>", "<NAME>"]
__license__ = "MIT"
__version__ = "2.0.0"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Production"
def peaks(y: np.ndarray,
x: np.ndarray,
**kwargs):
"""
Detect peaks of some function, y, in the grid space, x.
Parameters
----------
y: numpy.ndarray
x: numpy.ndarray
kwargs:
Additional keyword arguments passed to detecta.detect_peaks function
Returns
-------
List
"""
p = detect_peaks(y, **kwargs)
return [x[i] for i in p]
def filter_by_closest_centroid(x: np.ndarray,
labels: np.ndarray,
centroid: float):
"""
Filter peaks ('x') to keep only those
closest to their nearest centroid (centroid of clustered peaks).
Labels indicate where the peak originated from; either target
sample (0) or reference (1).
Parameters
----------
x: numpy.ndarray
labels: numpy.ndarray
centroid: float
Returns
-------
float, float
Peaks closest to centroid in cluster 1, Peaks closest to centroid in cluster 2
"""
x, labels = np.array(x), np.array(labels)
y1 = x[np.where(labels == 0)]
y2 = x[np.where(labels == 1)]
if len(y1) > 1:
y1 = y1[np.abs(y1 - centroid).argmin()]
else:
y1 = y1[0]
if len(y2) > 1:
y2 = y2[np.abs(y2 - centroid).argmin()]
else:
y2 = y2[0]
return y1, y2
def cluster_landmarks(p: np.ndarray,
plabels: np.ndarray):
"""
Cluster peaks (p). plabels indicate where the peak originated from; either target
sample (0) or reference (1). The number of clusters, determined by KMeans clustering
is equal to the number of peaks for the target sample.
Parameters
----------
p: numpy.ndarray
Peaks
plabels: numpy.ndarray
Peak labels
Returns
-------
numpy.ndarray, numpy.ndarray
K Means labels for each peak, cluster centroids
"""
km = KMeans(n_clusters=len(np.where(np.array(plabels) == 0)[0]),
random_state=42)
km_labels = km.fit_predict(np.array(p).reshape(-1, 1))
centroids = km.cluster_centers_.reshape(-1)
return km_labels, centroids
def zero_entropy_clusters(km_labels: np.ndarray,
plabels: np.ndarray,
centroids: np.ndarray):
"""
Determine which clusters (if any) have zero entropy (only contains
peaks from a single sample; either target or reference)
Parameters
----------
km_labels: numpy.ndarray
K means cluster labels
plabels: numpy.ndarray
Origin of the peak; either target (0) or reference (1)
centroids: numpy.ndarray
Cluster centroids
Returns
-------
List
List of centroids for clusters with zero entropy
"""
zero_entropy = list()
for i in | np.unique(km_labels) | numpy.unique |
# Copyright 2016 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Various function to manipulate graphs for computing distances.
"""
import skimage.morphology
import numpy as np
import networkx as nx
import itertools
import logging
import graph_tool as gt
import graph_tool.topology
import graph_tool.generation
import src.utils as utils
# Compute shortest path from all nodes to or from all source nodes
def get_distance_node_list(gtG, source_nodes, direction, weights=None):
gtG_ = gt.Graph(gtG)
v = gtG_.add_vertex()
if weights is not None:
weights = gtG_.edge_properties[weights]
for s in source_nodes:
e = gtG_.add_edge(s, int(v))
if weights is not None:
weights[e] = 0.
if direction == 'to':
dist = gt.topology.shortest_distance(
gt.GraphView(gtG_, reversed=True), source=gtG_.vertex(int(v)),
target=None, weights=weights)
elif direction == 'from':
dist = gt.topology.shortest_distance(
gt.GraphView(gtG_, reversed=False), source=gtG_.vertex(int(v)),
target=None, weights=weights)
dist = np.array(dist.get_array())
dist = dist[:-1]
if weights is None:
dist = dist - 1
return dist
# Functions for semantically labelling nodes in the traversal graph.
def generate_lattice(sz_x, sz_y):
"""Generates a lattice with sz_x vertices along x and sz_y vertices along y
direction Each of these vertices is step_size distance apart. Origin is at
(0,0). """
g = gt.generation.lattice([sz_x, sz_y])
x, y = np.meshgrid(np.arange(sz_x), np.arange(sz_y))
x = np.reshape(x, [-1, 1]);
y = np.reshape(y, [-1, 1]);
nodes = np.concatenate((x, y), axis=1)
return g, nodes
def add_diagonal_edges(g, nodes, sz_x, sz_y, edge_len):
offset = [sz_x + 1, sz_x - 1]
for o in offset:
s = np.arange(nodes.shape[0] - o - 1)
t = s + o
ind = np.all(np.abs(nodes[s, :] - nodes[t, :]) == np.array([[1, 1]]), axis=1)
s = s[ind][:, np.newaxis]
t = t[ind][:, np.newaxis]
st = np.concatenate((s, t), axis=1)
for i in range(st.shape[0]):
e = g.add_edge(st[i, 0], st[i, 1], add_missing=False)
g.ep['wts'][e] = edge_len
def convert_traversible_to_graph(traversible, ff_cost=1., fo_cost=1.,
oo_cost=1., connectivity=4):
assert (connectivity == 4 or connectivity == 8)
sz_x = traversible.shape[1]
sz_y = traversible.shape[0]
g, nodes = generate_lattice(sz_x, sz_y)
# Assign costs.
edge_wts = g.new_edge_property('float')
g.edge_properties['wts'] = edge_wts
wts = np.ones(g.num_edges(), dtype=np.float32)
edge_wts.get_array()[:] = wts
if connectivity == 8:
add_diagonal_edges(g, nodes, sz_x, sz_y, np.sqrt(2.))
se = np.array([[int(e.source()), int(e.target())] for e in g.edges()])
s_xy = nodes[se[:, 0]]
t_xy = nodes[se[:, 1]]
s_t = np.ravel_multi_index((s_xy[:, 1], s_xy[:, 0]), traversible.shape)
t_t = np.ravel_multi_index((t_xy[:, 1], t_xy[:, 0]), traversible.shape)
s_t = traversible.ravel()[s_t]
t_t = traversible.ravel()[t_t]
wts = np.zeros(g.num_edges(), dtype=np.float32)
wts[np.logical_and(s_t == True, t_t == True)] = ff_cost
wts[np.logical_and(s_t == False, t_t == False)] = oo_cost
wts[np.logical_xor(s_t, t_t)] = fo_cost
edge_wts = g.edge_properties['wts']
for i, e in enumerate(g.edges()):
edge_wts[e] = edge_wts[e] * wts[i]
# d = edge_wts.get_array()*1.
# edge_wts.get_array()[:] = d*wts
return g, nodes
def label_nodes_with_class(nodes_xyt, class_maps, pix):
"""
Returns:
class_maps__: one-hot class_map for each class.
node_class_label: one-hot class_map for each class, nodes_xyt.shape[0] x n_classes
"""
# Assign each pixel to a node.
selem = skimage.morphology.disk(pix)
class_maps_ = class_maps * 1.
for i in range(class_maps.shape[2]):
class_maps_[:, :, i] = skimage.morphology.dilation(class_maps[:, :, i] * 1, selem)
class_maps__ = np.argmax(class_maps_, axis=2)
class_maps__[np.max(class_maps_, axis=2) == 0] = -1
# For each node pick out the label from this class map.
x = np.round(nodes_xyt[:, [0]]).astype(np.int32)
y = np.round(nodes_xyt[:, [1]]).astype(np.int32)
ind = np.ravel_multi_index((y, x), class_maps__.shape)
node_class_label = class_maps__.ravel()[ind][:, 0]
# Convert to one hot versions.
class_maps_one_hot = np.zeros(class_maps.shape, dtype=np.bool)
node_class_label_one_hot = np.zeros((node_class_label.shape[0], class_maps.shape[2]), dtype=np.bool)
for i in range(class_maps.shape[2]):
class_maps_one_hot[:, :, i] = class_maps__ == i
node_class_label_one_hot[:, i] = node_class_label == i
return class_maps_one_hot, node_class_label_one_hot
def label_nodes_with_class_geodesic(nodes_xyt, class_maps, pix, traversible,
ff_cost=1., fo_cost=1., oo_cost=1.,
connectivity=4):
"""Labels nodes in nodes_xyt with class labels using geodesic distance as
defined by traversible from class_maps.
Inputs:
nodes_xyt
class_maps: counts for each class.
pix: distance threshold to consider close enough to target.
traversible: binary map of whether traversible or not.
Output:
labels: For each node in nodes_xyt returns a label of the class or -1 is
unlabelled.
"""
g, nodes = convert_traversible_to_graph(traversible, ff_cost=ff_cost,
fo_cost=fo_cost, oo_cost=oo_cost,
connectivity=connectivity)
class_dist = np.zeros_like(class_maps * 1.)
n_classes = class_maps.shape[2]
if False:
# Assign each pixel to a class based on number of points.
selem = skimage.morphology.disk(pix)
class_maps_ = class_maps * 1.
class_maps__ = np.argmax(class_maps_, axis=2)
class_maps__[np.max(class_maps_, axis=2) == 0] = -1
# Label nodes with classes.
for i in range(n_classes):
# class_node_ids = np.where(class_maps__.ravel() == i)[0]
class_node_ids = np.where(class_maps[:, :, i].ravel() > 0)[0]
dist_i = get_distance_node_list(g, class_node_ids, 'to', weights='wts')
class_dist[:, :, i] = np.reshape(dist_i, class_dist[:, :, i].shape)
class_map_geodesic = (class_dist <= pix)
class_map_geodesic = np.reshape(class_map_geodesic, [-1, n_classes])
# For each node pick out the label from this class map.
x = np.round(nodes_xyt[:, [0]]).astype(np.int32)
y = np.round(nodes_xyt[:, [1]]).astype(np.int32)
ind = np.ravel_multi_index((y, x), class_dist[:, :, 0].shape)
node_class_label = class_map_geodesic[ind[:, 0], :]
class_map_geodesic = class_dist <= pix
return class_map_geodesic, node_class_label
def _get_next_nodes_undirected(n, sc, n_ori):
nodes_to_add = []
nodes_to_validate = []
(p, q, r) = n
nodes_to_add.append((n, (p, q, r), 0))
if n_ori == 4:
for _ in [1, 2, 3, 4]:
if _ == 1:
v = (p - sc, q, r)
elif _ == 2:
v = (p + sc, q, r)
elif _ == 3:
v = (p, q - sc, r)
elif _ == 4:
v = (p, q + sc, r)
nodes_to_validate.append((n, v, _))
return nodes_to_add, nodes_to_validate
def _get_next_nodes(n, sc, n_ori):
nodes_to_add = []
nodes_to_validate = []
(p, q, r) = n
for r_, a_ in zip([-1, 0, 1], [1, 0, 2]):
nodes_to_add.append((n, (p, q, np.mod(r + r_, n_ori)), a_))
if n_ori == 6:
if r == 0:
v = (p + sc, q, r)
elif r == 1:
v = (p + sc, q + sc, r)
elif r == 2:
v = (p, q + sc, r)
elif r == 3:
v = (p - sc, q, r)
elif r == 4:
v = (p - sc, q - sc, r)
elif r == 5:
v = (p, q - sc, r)
elif n_ori == 4:
if r == 0:
v = (p + sc, q, r)
elif r == 1:
v = (p, q + sc, r)
elif r == 2:
v = (p - sc, q, r)
elif r == 3:
v = (p, q - sc, r)
nodes_to_validate.append((n, v, 3))
return nodes_to_add, nodes_to_validate
def generate_graph(valid_fn_vec=None, sc=1., n_ori=6,
starting_location=(0, 0, 0), vis=False, directed=True):
timer = utils.Timer()
timer.tic()
if directed:
G = nx.DiGraph(directed=True)
else:
G = nx.Graph()
G.add_node(starting_location)
new_nodes = G.nodes()
while len(new_nodes) != 0:
nodes_to_add = []
nodes_to_validate = []
for n in new_nodes:
if directed:
na, nv = _get_next_nodes(n, sc, n_ori)
else:
na, nv = _get_next_nodes_undirected(n, sc, n_ori)
nodes_to_add = nodes_to_add + na
if valid_fn_vec is not None:
nodes_to_validate = nodes_to_validate + nv
else:
node_to_add = nodes_to_add + nv
# Validate nodes.
vs = [_[1] for _ in nodes_to_validate]
valids = valid_fn_vec(vs)
for nva, valid in zip(nodes_to_validate, valids):
if valid:
nodes_to_add.append(nva)
new_nodes = []
for n, v, a in nodes_to_add:
if not G.has_node(v):
new_nodes.append(v)
G.add_edge(n, v, action=a)
timer.toc(average=True, log_at=1, log_str='src.graph_utils.generate_graph')
return (G)
def vis_G(G, ax, vertex_color='r', edge_color='b', r=None):
if edge_color is not None:
for e in G.edges():
XYT = zip(*e)
x = XYT[-3]
y = XYT[-2]
t = XYT[-1]
if r is None or t[0] == r:
ax.plot(x, y, edge_color)
if vertex_color is not None:
XYT = zip(*G.nodes())
x = XYT[-3]
y = XYT[-2]
t = XYT[-1]
ax.plot(x, y, vertex_color + '.')
def convert_to_graph_tool(G):
timer = utils.Timer()
timer.tic()
gtG = gt.Graph(directed=G.is_directed())
gtG.ep['action'] = gtG.new_edge_property('int')
nodes_list = G.nodes()
nodes_array = np.array(nodes_list)
nodes_id = np.zeros((nodes_array.shape[0],), dtype=np.int64)
for i in range(nodes_array.shape[0]):
v = gtG.add_vertex()
nodes_id[i] = int(v)
# d = {key: value for (key, value) in zip(nodes_list, nodes_id)}
d = dict(itertools.izip(nodes_list, nodes_id))
for src, dst, data in G.edges_iter(data=True):
e = gtG.add_edge(d[src], d[dst])
gtG.ep['action'][e] = data['action']
nodes_to_id = d
timer.toc(average=True, log_at=1, log_str='src.graph_utils.convert_to_graph_tool')
return gtG, nodes_array, nodes_to_id
def _rejection_sampling(rng, sampling_d, target_d, bins, hardness, M):
bin_ind = np.digitize(hardness, bins) - 1
i = 0
ratio = target_d[bin_ind] / (M * sampling_d[bin_ind])
while i < ratio.size and rng.rand() > ratio[i]:
i = i + 1
return i
def heuristic_fn_vec(n1, n2, n_ori, step_size):
# n1 is a vector and n2 is a single point.
dx = (n1[:, 0] - n2[0, 0]) / step_size
dy = (n1[:, 1] - n2[0, 1]) / step_size
dt = n1[:, 2] - n2[0, 2]
dt = np.mod(dt, n_ori)
dt = np.minimum(dt, n_ori - dt)
if n_ori == 6:
if dx * dy > 0:
d = np.maximum(np.abs(dx), np.abs(dy))
else:
d = np.abs(dy - dx)
elif n_ori == 4:
d = np.abs(dx) + | np.abs(dy) | numpy.abs |
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 22 15:40:32 2021
@author: <NAME>
Script takes as input a dataset of the type "Spooled files_000x.tif", images
from the Andor camera, assumes every odd number for x is a shot with the tweezer off
(just MOT), while every even number is with tweezer on as well.
Subtracts MOT from tweezer + MOT and averages over multiple attemps
"""
#%% Imports
import numpy as np
from PIL import Image
import cv2
import matplotlib.pyplot as plt
#%% Loading Data
# Data location and define empty lists
data_location = "U:/KAT1/Images/dipole/MOToffDetuningSweep/"
file_name = "15ms_121mhz"
file_prefix = "/Spooled files_"
# File numbers, step is 2 because only odd/even iterations. Stop_number is the
# amount of pixel fixels you made
start_number = 4
stop_number = 40
step_number = 2
#%% Load even and odd files lists, corresponding to tweezer on or off
def load_image_list(start, stop, step, data_location, file_prefix):
iteration_list = []
# Load files
for i in range(start, stop, step):
im = Image.open(data_location +
file_name +
file_prefix +
str(i).zfill(4) +
".tif")
array = | np.array(im) | numpy.array |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
'''
API
========================================================================
Connection point for all package utilities to be provided
'''
import collections
import numba
import numpy as np
import sys
import time
import types
from bisect import bisect_left
import matplotlib.pyplot as plt
from . import GLOBALS
from . import auxiliaries as aux
from .auxiliaries import to_ndarray, wait
from .errorfunctions import get_errorfunction, maxsumabs
from . import models
from . import reference as ref # Careful with this circular import
# This global dictionary G is for passing some telemtery and debug arguments
global G
G = GLOBALS.dictionary
_sqrtrange = (aux.sqrtrange_python, aux.sqrtrange_numba)
fitsets = {'Poly10': models.Poly10}
#%%═════════════════════════════════════════════════════════════════════
## ROOT FINDING
def interval(f, x1, y1, x2, y2, fit1):
'''Returns the last x where f(x)<0'''
is_debug = G['debug']
if is_debug: #─────────────────────────────────────────────────────┐
print(f'\t{x1=}\t{y1=}')
print(f'\t{x2=}\t{y2=}')
G['mid'], = G['ax_root'].plot(x1, y1,'.', color = 'blue')
#──────────────────────────────────────────────────────────────────┘
# sqrtx1 = x1**0.5
# sqrtx2 = x2**0.5
while x2 - x1 > 2:
# Arithmetic mean between linear estimate and half
linest = x1 - y1 / (y2 - y1) * (x2 - x1)
halfest = (x2 + x1) / 2
# sqrtest1 = sqrtx1 - y1 * (sqrtx2 - sqrtx1) / (y2 - y1)
# sqrtest1 = sqrtest1*sqrtest1
# sqrtest2 = int(x1 + (x2 - x1) / (y2 / y1 - 1)**2)
# x_mid = int((x2 + x1) / 2)
x_mid = int((linest + halfest) / 2)
if x_mid == x1: # To stop repetition in close cases
x_mid += 1
elif x_mid == x2:
x_mid -= 1
y_mid, fit2 = f(x_mid)
if is_debug: #─────────────────────────────────────────────────┐
print(f'\t{x_mid=}\t{y_mid=}')
G['mid'].set_xdata(x_mid)
G['mid'].set_ydata(y_mid)
#──────────────────────────────────────────────────────────────┘
if y_mid > 0:
if is_debug: #─────────────────────────────────────────────┐
wait('\tError over tolerance\n')
G['ax_root'].plot(x2, y2,'.', color = 'black')
#──────────────────────────────────────────────────────────┘
x2, y2 = x_mid, y_mid
# sqrtx2 = x_mid **0.5
if is_debug: #─────────────────────────────────────────────┐
G['xy2'].set_xdata(x2)
G['xy2'].set_ydata(y2)
#──────────────────────────────────────────────────────────┘
else:
if is_debug: #─────────────────────────────────────────────┐
wait('\tError under tolerance\n')
G['ax_root'].plot(x1, y1,'.', color = 'black')
#──────────────────────────────────────────────────────────┘
x1, y1, fit1 = x_mid, y_mid, fit2
# sqrtx1 = x_mid ** 0.5
if is_debug: #─────────────────────────────────────────────┐
G['xy1'].set_xdata(x1)
G['xy1'].set_ydata(y1)
#──────────────────────────────────────────────────────────┘
if x2 - x1 == 2: # Points have only one point in between
y_mid, fit2 = f(x1+1) # Testing that point
return (x1+1, fit2) if (y_mid <0) else (x1, fit1) # If under, give that fit
else:
return x1, fit1
#───────────────────────────────────────────────────────────────────────
def droot(f, y1, x2, limit):
'''Finds the upper limit to interval
'''
is_debug = G['debug']
x1 = 0
y2, fit2 = f(x2)
fit1 = None
if is_debug: #─────────────────────────────────────────────────────┐
G['xy1'], = G['ax_root'].plot(x1, y1,'g.')
G['xy2'], = G['ax_root'].plot(x2, y2,'b.')
#──────────────────────────────────────────────────────────────────┘
while y2 < 0:
if is_debug: #─────────────────────────────────────────────────┐
wait('Calculating new attempt in droot\n')
G['ax_root'].plot(x1, y1,'k.')
#──────────────────────────────────────────────────────────────┘
x1, y1, fit1 = x2, y2, fit2
x2 *= 2
x2 += 1
if is_debug: #─────────────────────────────────────────────────┐
print(f'{limit=}')
print(f'{x1=}\t{y1=}')
print(f'{x2=}\t{y2=}')
G['xy1'].set_xdata(x1)
G['xy1'].set_ydata(y1)
G['xy2'].set_xdata(x2)
#──────────────────────────────────────────────────────────────┘
if x2 >= limit:
if is_debug: #─────────────────────────────────────────────┐
G['ax_root'].plot([limit, limit], [y1,0],'b.')
#──────────────────────────────────────────────────────────┘
y2, fit2 = f(limit)
if y2<0:
if is_debug: #─────────────────────────────────────────┐
wait('End reached within tolerance\n')
#──────────────────────────────────────────────────────┘
return limit, fit2
else:
if is_debug: #─────────────────────────────────────────┐
wait('End reached outside tolerance\n')
#──────────────────────────────────────────────────────┘
x2 = limit
break
y2, fit2 = f(x2)
if is_debug: #─────────────────────────────────────────────────┐
G['ax_root'].plot(x1, y1,'k.')
print(f'{x1=}\t{y1=}')
print(f'{x2=}\t{y2=}')
G['ax_root'].plot(x2, y2,'k.')
G['xy2'].set_ydata(y2)
#──────────────────────────────────────────────────────────────┘
if is_debug: #─────────────────────────────────────────────────────┐
G['xy2'].set_color('red')
wait('Points for interval found\n')
#──────────────────────────────────────────────────────────────────┘
return interval(f, x1, y1, x2, y2, fit1)
#───────────────────────────────────────────────────────────────────────
# @numba.jit(nopython=True,cache=True)
def n_lines(x: np.ndarray, y: np.ndarray, x0: float, y0: np.ndarray, tol: float
) -> float:
'''Estimates number of lines required to fit within error tolerance'''
if (length := len(x)) > 1:
inds = _sqrtrange[0](length - 2) # indices so that x[-1] is not included
res = (y[-1] - y0) / (x[-1] - x0)*(x[inds] - x0).reshape([-1,1]) - (y[inds] - y0)
reference = maxsumabs(res, tol)
if reference < 0: reference = 0
return 0.5 * reference ** 0.5 + 1
else:
return 1.
#───────────────────────────────────────────────────────────────────────
def _get_f2zero(x, y, x0, y0, sqrtrange, f_fit, errorfunction):
def f2zero(i: int) -> tuple:
'''Function such that i is optimal when f2zero(i) = 0'''
inds = sqrtrange(i)
residuals, fit = f_fit(x[inds], y[inds], x0, y0)
return errorfunction(residuals), fit
return f2zero
#───────────────────────────────────────────────────────────────────────
def _get_f2zero_debug(x, y, x0, y0, sqrtrange, f_fit, errorfunction):
def f2zero_debug(i: int) -> tuple:
'''Function such that i is optimal when f2zero(i) = 0'''
inds = sqrtrange(i)
residuals, fit = f_fit(x[inds], y[inds], x0, y0)
if len(residuals) == 1:
print(f'\t\t{residuals=}')
print(f'\t\tstart = {G["start"]} end = {i + G["start"]} points = {i + 1}')
print(f'\t\tx0\t{x0}\n\t\tx[0]\t{x[inds][0]}\n\t\tx[-1]\t{x[inds][-1]}\n\t\txstart = {G["x"][G["start"]]}')
indices_all = np.arange(-1, i + 1) + G['start']
G['x_plot'] = G['x'][indices_all]
G['y_plot'] = G['fyc'](fit, G['x_plot'])
G['line_fit'].set_xdata(G['x_plot'])
G['line_fit'].set_ydata(G['y_plot'])
# print(f'{G["y_plot"].shape=}')
# print(f'{G["y"][indices_all].shape=}')
res_all = G['y_plot'] - G['y'][indices_all].flatten()
print(f'\t\t{residuals.shape=}\n\t\t{res_all.shape=}')
G['ax_res'].clear()
G['ax_res'].grid()
G['ax_res'].axhline(color = 'red', linestyle = '--')
G['ax_res'].set_ylabel('Residual relative to tolerance')
G['ax_res'].plot(indices_all - G['start'], | np.abs(res_all) | numpy.abs |
import dxfgrabber
import numpy as np
import sys, os.path, inspect
import re
import pcbnew
import bulge
import pcbpoint
from sets import Set
import pdb
# how close together to points need to be to each other to be considered
# connected?
thresh = 0.01
# when breaking and arc into segments, what's the max seg length we want.
arc_line_length = 5.0
# I mostly depend on pcbpoint to deal with scaling issues. For radius below,
# still need to scale.
# the internal coorinate space of pcbnew is 10E-6 mm. (a millionth of a mm)
# the coordinate 121550000 corresponds to 121.550000
SCALE = 1000000.0
class graphic_actions:
def __init__(self, print_unhandled=False):
self.print_unhandled = print_unhandled
def line_action(self, start, end):
if (self.print_unhandled):
print("line: {} {}".format(start, end))
def circle_action(self, center, radius):
if (self.print_unhandled):
print("circle center: {} radius: {}".format(center, radius))
def arc_action(self, center, radius, start_angle, end_angle):
if (self.print_unhandled):
print("arc center: {} radius {} angles: {} {}".format(center, radius, start_angle, end_angle))
def poly_action(self, points):
if (self.print_unhandled):
print("poly: {}".format(points))
# here is some helper stuff
# dxf arcs are different from pcbnew arcs
# dxf arcs have a center point, radius and start/stop angles
# pcbnew arcs have a center pointer, radius, and a start point,
# angle (counter clockwise)
def dxfarc2pcbarc(self, center, radius, start_angle, end_angle):
# need negative angles because pcbnew flips over x axis
start_angle, end_angle = (min(start_angle, end_angle), max(start_angle, end_angle))
return (center,
start_angle-end_angle,
# location of start of arc
center.polar(radius, start_angle))
class segment_actions(graphic_actions):
def __init__(self, board, layer, print_unhandled=False):
graphic_actions.__init__(self, print_unhandled)
self.board = board
self.layer = layer
def make_basic_seg(self):
seg = pcbnew.DRAWSEGMENT(self.board)
seg.SetLayer(self.layer)
seg.SetShape(pcbnew.S_SEGMENT)
self.board.Add(seg)
return seg
def line_action(self, start, end):
seg = self.make_basic_seg()
seg.SetShape(pcbnew.S_SEGMENT)
seg.SetStart(pcbpoint.pcbpoint(start).wxpoint())
seg.SetEnd(pcbpoint.pcbpoint(end).wxpoint())
def circle_action(self, center, radius):
seg = self.make_basic_seg()
seg.SetShape(pcbnew.S_CIRCLE)
seg.SetCenter(pcbpoint.pcbpoint(center).wxpoint())
# kicad has a goofy way of specifying circles. instead
# of a radius, you give a point on the circle. The radius
# can be computed from there.
seg.SetEnd((pcbpoint.pcbpoint(center)+
pcbpoint.pcbpoint(radius, 0)).wxpoint()
)
def arc_action(self, center, radius, start_angle, end_angle):
# dxf arcs are different from pcbnew arcs
# dxf arcs have a center point, radius and start/stop angles
# pcbnew arcs have a center pointer, radius, and a start point,
# angle (counter clockwise)
seg = self.make_basic_seg()
center, angle, arcstart = self.dxfarc2pcbarc(pcbpoint.pcbpoint(center),
radius,
start_angle,
end_angle)
seg.SetShape(pcbnew.S_ARC)
seg.SetCenter(pcbpoint.pcbpoint(center).wxpoint())
# negative angle since y goes the wrong way.
seg.SetAngle(angle*10)
seg.SetArcStart(pcbpoint.pcbpoint(arcstart).wxpoint())
def poly_action(self, points):
seg = self.make_basic_seg()
seg.SetShape(pcbnew.S_POLYGON)
sps = seg.GetPolyShape()
o = sps.NewOutline()
for pt in points:
ppt = pcbpoint.pcbpoint(pt).wxpoint()
sps.Append(ppt.x, ppt.y)
class zone_actions(graphic_actions):
def __init__(self, board, net, layer, print_unhandled=False):
graphic_actions.__init__(self, print_unhandled)
self.board = board
self.net = net
self.layer = layer
# poly is the only thing that really makes sense in
# the zone context
def poly_action(self, points):
pcbpt = pcbpoint.pcbpoint(points[0]).wxpoint()
zone_container = self.board.InsertArea(self.net.GetNet(), 0, self.layer,
pcbpt.x, pcbpt.y,
pcbnew.CPolyLine.DIAGONAL_EDGE)
shape_poly_set = zone_container.Outline()
shapeid = 0
for pt in points[1:]:
pcbpt = pcbpoint.pcbpoint(pt).wxpoint()
shape_poly_set.Append(pcbpt.x, pcbpt.y)
zone_container.Hatch()
class mounting_actions(graphic_actions):
def __init__(self, board, footprint_mapping,
flip=False,
clearance=None,
print_unhandled=False):
graphic_actions.__init__(self, print_unhandled)
self.footprint_mapping = footprint_mapping
self.board = board
self.flip = flip
self.clearance = clearance
def circle_action(self, center, radius):
d = str(radius*2)
if d not in self.footprint_mapping:
print("diameter {} not found in footprint mapping".format(d))
return
fp = self.footprint_mapping[d]
mod = pcbnew.InstantiateFootprint(fp[0], fp[1])
mod.SetPosition(pcbpoint.pcbpoint(center).wxpoint())
if (self.flip):
mod.Flip(pcbpoint.pcbpoint(center).wxpoint())
if (self.clearance != None):
for pad in mod.Pads():
pad.SetLocalClearance(self.clearance)
self.board.Add(mod)
# http://www.ariel.com.au/a/python-point-int-poly.html
# determine if a point is inside a given polygon or not
# Polygon is a list of (x,y) pairs.
def point_inside_polygon(x,y,poly):
n = len(poly)
inside =False
p1x,p1y = poly[0]
for i in range(n+1):
p2x,p2y = poly[i % n]
if y > min(p1y,p2y):
if y <= max(p1y,p2y):
if x <= max(p1x,p2x):
if p1y != p2y:
xinters = (y-p1y)*(p2x-p1x)/(p2y-p1y)+p1x
if p1x == p2x or x <= xinters:
inside = not inside
p1x,p1y = p2x,p2y
return inside
def longest_angle_for_polygon(poly):
prevpt = poly[-1]
length = None
retval = None
for pt in poly:
d = prevpt.distance(pt)
if (length and (length>d)):
prevpt = pt
continue
length = d
retval = prevpt.angle(pt)
prevpt = pt
return retval
def center_for_polygon(poly):
# based on this:
# https://en.wikipedia.org/wiki/Centroid#Centroid_of_a_polygon
prev = poly[-1]
x = 0.0
y = 0.0
area = 0.0
for cur in poly:
x = x + (prev.x+cur.x)*(prev.x*cur.y - cur.x*prev.y)
y = y + (prev.y+cur.y)*(prev.x*cur.y - cur.x*prev.y)
area = area + prev.x*cur.y - cur.x*prev.y
prev = cur
area = area/2.0
x = x/6.0/area
y = y/6.0/area
return pcbpoint.pcbpoint(x,y,noscale=True)
class orient_actions(graphic_actions):
def __init__(self, board, modnames, print_unhandled=False):
graphic_actions.__init__(self, print_unhandled)
self.board = board
self.modnames = Set(modnames)
# I only care about poly because I want directionality (which a cirle doesn't have)
# and I want to check for enclosing (which doesn't make sense for line, arc
def poly_action(self, points):
for mod in self.board.GetModules():
#modname = mod.GetFPID().GetLibItemName().c_str()
#if (modname != "LED_5730"):
# continue
modname = mod.GetReference()
if (modname not in self.modnames):
continue
pos = pcbpoint.pcbpoint(mod.GetPosition())
inside = point_inside_polygon(pos.x, pos.y, points)
if (not inside):
continue
angle = longest_angle_for_polygon(points)
if (angle>0):
angle = angle - 180.0
mod.SetOrientation(angle*10)
mod.SetPosition(center_for_polygon(points).wxpoint())
class myarc:
def __init__(self, center, radius, start_angle, end_angle):
self.center = center = pcbpoint.pcbpoint(center)
self.radius = radius
self.start_angle = start_angle
self.end_angle = end_angle
self.start_point = center.polar(radius, start_angle)
self.end_point = center.polar(radius, end_angle)
self.other = Set()
def reverse(self):
self.start_angle, self.end_angle = (self.end_angle, self.start_angle)
self.start_point, self.end_point = (self.end_point, self.start_point)
def __str__(self):
return "arc c{} r{} {},{} {},{}".format(self.center, self.radius, self.start_angle, self.end_angle, self.start_point, self.end_point)
class myline:
def __init__(self, start_point, end_point):
self.start_point = pcbpoint.pcbpoint(start_point)
self.end_point = pcbpoint.pcbpoint(end_point)
self.other = Set()
def reverse(self):
self.start_point, self.end_point = (self.end_point, self.start_point)
def __str__(self):
return "line {} {}".format(self.start_point, self.end_point)
def mydist(o1, o2):
return min(o1.start_point.distance(o2.start_point),
o1.start_point.distance(o2.end_point),
o1.end_point.distance(o2.start_point),
o1.end_point.distance(o2.end_point))
# it is possible for two polygons to meet at a point. This implementation
# will destroy those. worry about it later.
def remove_non_duals(e):
if (len(e.other) == 2):
return
# if I have three lines joining in one spot. this doesn't deal
# with the properly. I try to mitigate that by beginning with
# lines that connect at only one edge first.
others = e.other
e.other = Set()
for other in others:
other.other.remove(e)
remove_non_duals(other)
def merge_arcs_and_lines(elts):
# yes, this is a O(n^2) algorithm. scanline would be better
# this is quicker to implement
for e1 in elts:
for e2 in elts:
if (e1==e2):
continue
if mydist(e1, e2)<thresh:
e1.other.add(e2)
e2.other.add(e1)
# this needs some work. if I have a line that connects to a poly,
# I want to first lose that line and only then break triple connections.
for e in elts:
if (len(e.other) == 1):
remove_non_duals(e)
for e in elts:
remove_non_duals(e)
merged = []
for e in elts:
if (len(e.other) != 2):
continue
members = [e]
# after this pop, the e.other set will have one member
other = e.other.pop()
other.other.remove(e)
while (other):
if (other == e):
break
nextelt = other.other.pop()
nextelt.other.remove(other)
members.append(other)
other = nextelt
if (len(members) < 2):
raise ValueError("There should be at least two members in this merged poly")
prev = members[-1]
# here, I'm about to reorder the point order of the member lines/arcs. I
# want to end_point to match the next member.
if ((members[0].start_point.distance(prev.end_point) > thresh) and
(members[0].end_point.distance(prev.end_point) > thresh)):
prev.reverse()
for m in members:
if (m.start_point.distance(prev.end_point) > thresh):
m.reverse()
if (m.start_point.distance(prev.end_point) > thresh):
raise ValueError("expecting the start and end to match here {} {}".format(prev, m))
prev = m
merged.append(members)
return merged
def break_curve(center, radius, start_angle, end_angle):
retpts = []
center = pcbpoint.pcbpoint(center)
# in this file, I generally use degrees (kicad uses degrees),
# in this function, radians more convenient.
start_radians, end_radians = ( | np.deg2rad(start_angle) | numpy.deg2rad |
# -*- coding: utf-8 -*-
"""LDFA-H: Latent Dynamic Factor Analysis of High-dimensional time-series
This module implements the fitting algorithm of LDFA-H and the accessory functions to
facilitate the associate analyses or inferences.
Todo
----
* Correct function ``fit_Phi``.
.. _[1] Bong et al. (2020). Latent Dynamic Factor Analysis of High-Dimensional Neural Recordings. Submitted to NeurIPS2020.
"""
import time, sys, traceback
import numpy as np
from scipy import linalg
import ldfa.optimize as core
def _generate_lambda_glasso(bin_num, lambda_glasso, offset, lambda_diag=None):
"""Generate sparsity penalty matrix Lambda for a submatrix in Pi."""
lambda_glasso_out = np.full((bin_num, bin_num), -1) + (1+lambda_glasso) * \
(np.abs(np.arange(bin_num) - np.arange(bin_num)[:,np.newaxis]) <= offset)
if lambda_diag:
lambda_glasso_out[np.arange(bin_num), np.arange(bin_num)] = lambda_diag
return lambda_glasso_out
def _switch_back(Sigma, Phi_S, Gamma_T, Phi_T, beta):
"""Make the initial Phi_S positive definite."""
w, v = np.linalg.eig(Sigma)
sqrtS = (v*np.sqrt(w)[...,None,:])@v.transpose(0,2,1)
alpha = np.min(np.linalg.eigvals(
sqrtS @ linalg.block_diag(*Gamma_T) @ sqrtS),-1)/2
return (Sigma - alpha[:,None,None]*linalg.block_diag(*Phi_T),
[P + (b*alpha)@b.T for P, b in zip(Phi_S, beta)])
def _make_PD(m, thres_eigen=1e-4):
"""Make a coariance PSD based on its eigen decomposition."""
s, v = np.linalg.eigh(m)
if np.min(s)<=thres_eigen*np.max(s):
delta = thres_eigen*np.max(s) - np.min(s)
s = s + delta
return ([email protected](s)@np.linalg.inv(v))
def _temporal_est(V_eps_T, ar_order):
"""Perform temporal estimate given V_T.
.. _[1] <NAME>. and <NAME>. (2008). Regularized estimation of large covariance matrices. Ann. Statist., 36(1):199–227.
"""
num_time = V_eps_T.shape[0]
resids = np.zeros(num_time)
Amatrix = np.zeros([num_time, num_time])
resids[0] = V_eps_T[0,0]
for i in np.arange(1, ar_order):
Amatrix[i,:i] = np.linalg.pinv(V_eps_T[:i,:i]) @ V_eps_T[:i,i]
resids[i] = V_eps_T[i,i] \
- V_eps_T[i,:i] @ np.linalg.pinv(V_eps_T[:i,:i]) @ V_eps_T[:i,i]
for i in np.arange(ar_order, num_time):
Amatrix[i,i-ar_order:i] = np.linalg.pinv(V_eps_T[i-ar_order:i,i-ar_order:i]) \
@ V_eps_T[i-ar_order:i,i]
resids[i] = V_eps_T[i,i] \
- V_eps_T[i,i-ar_order:i] \
@ np.linalg.pinv(V_eps_T[i-ar_order:i,i-ar_order:i]) \
@ V_eps_T[i-ar_order:i,i]
# invIA = np.linalg.pinv(np.eye(num_time) - Amatrix)
# Psi_T_hat = invIA @ np.diag(resids) @ invIA.T
# Gamma_T_hat = np.linalg.inv(Psi_T_hat)
Gamma_T_hat = (np.eye(num_time)-Amatrix).T @ np.diag(1/resids) \
@ (np.eye(num_time)-Amatrix)
Psi_T_hat = np.linalg.pinv(Gamma_T_hat)
return Gamma_T_hat, Psi_T_hat
def fit(data, num_f, lambda_cross, offset_cross,
lambda_auto=None, offset_auto=None, lambda_aug=0,
ths_ldfa=1e-2, max_ldfa=1000, ths_glasso=1e-8, max_glasso=1000,
ths_lasso=1e-8, max_lasso=1000, params_init=dict(), make_PD=False,
verbose=False):
"""The main function to perform multi-factor LDFA-H estimation.
Parameters
----------
data: list of (N, p_k, T) ndarrays
Observed data from K areas. Data from each area k consists of p_k-variate
time-series over T time bins in N trials.
num_f: int
The number of factors.
lambda_cross, lambda_auto: float
The sparsity penalty parameter for the inverse cross-correlation and inverse
auto-correlation matrix, respectively. The default value for lambda_auto is 0.
offset_cross, offset_auto: int
The bandwidth parameter for the inverse cross-correlation matrix and inverse
auto-correlation matrix, respectively. The default value for offset_auto is the
given value of offset_cross.
ths_ldfa, ths_glasso, ths_lasso: float, optional
The threshold values for deciding the convergence of the main iteration, the
glasso iteration, and the lasso iteration, respectively.
max_ldfa, max_glasso, max_lasso: int, optional
The maximum number of iteration for the main iteration, the glasso iteration,
and the lasso iteration, respectively.
beta_init: list of (p_k, num_f) ndarrays, optional
Custom initial values for beta. If not given, beta is initialized by CCA.
make_PD: boolean, optional
Switch for manual positive definitization. If data does not generate a positive
definite estimate of the covariance matrix, ``make_PD = True`` helps with
maintaining the matrix positive definite throughout the fitting algorithm. The
default value is False for the sake of running time.
verbose: boolean, optional
Swith for vocal feedback throughout the fitting algorithm. The default value is
False.
Returns
-------
Pi: (K*T, K*T) ndarray
The estimated sparse inverse correlation matrix.
Rho: (K*T, K*T) ndarray
The estimated correlation matrix before sparsification. Note that Rho != Pi^{-1}.
params: dict
The dictionary of the estimated parameters. It provides with the estimation of
Omega: (num_f, K*T, K*T) ndarray;
Gamma_S: a list of (p_k, p_k) ndarrays for k = 1, ..., K;
Gamma_T: a list of (T, T) ndarrays for k = 1, ..., K;
beta: a list of (p_k, num_f) ndarrays for k = 1, ..., K; and
mu: a list of (p_k, T) ndarrays for k = 1, ..., K.
Examples
--------
Pi, Rho, params =\
fit(data, num_f, lambda_cross, offset_cross, lambda_auto, offset_auto)
.. _[1] Bong et al. (2020). Latent Dynamic Factor Analysis of High-Dimensional Neural Recordings. Submitted to NeurIPS2020.
"""
dims = [dat.shape[1] for dat in data]
num_time = data[0].shape[2]
num_trial = data[0].shape[0]
# get full_graph
if lambda_auto is None:
lambda_auto = lambda_cross
if offset_auto is None:
offset_auto = offset_cross
lambda_glasso_auto = _generate_lambda_glasso(num_time, lambda_auto,
offset_auto)
lambda_glasso_cross = _generate_lambda_glasso(num_time, lambda_cross,
offset_cross)
lambda_glasso = np.array(np.block(
[[lambda_glasso_auto if j==i else lambda_glasso_cross
for j, _ in enumerate(data)]
for i, _ in enumerate(data)]))
# set mu
mu= [np.mean(dat, 0) for dat in data]
# initialization
if all(key in params_init for key in ('Omega', 'mu', 'beta', 'Gamma_S', 'Gamma_T')):
Omega = params_init['Omega']; mu = params_init['mu']; beta = params_init['beta']
Gamma_S = params_init['Gamma_S']; Gamma_T = params_init['Gamma_T']
Sigma = np.linalg.inv(Omega)
sig = np.sqrt(np.diagonal(Sigma,0,1,2))
Rho = Sigma/sig[:,None,:]/sig[:,:,None]
Pi = np.linalg.inv(Rho)
else:
if 'beta' in params_init:
beta = [b.copy() for b in params['beta_init']]
weight = [np.linalg.pinv(b) for b in beta]
elif len(data)==2:
# initialize beta by CCA
S_xt = np.tensordot(
np.concatenate([dat-m for dat, m in zip(data,mu)], 1),
np.concatenate([dat-m for dat, m in zip(data,mu)], 1),
axes=((0,2),(0,2)))/num_trial/num_time
S_1 = S_xt[:dims[0],:dims[0]]
S_12 = S_xt[:dims[0],dims[0]:]
S_2 = S_xt[dims[0]:,dims[0]:]
U_1 = linalg.inv(linalg.sqrtm(S_1))
U_2 = linalg.inv(linalg.sqrtm(S_2))
u, s, vh = np.linalg.svd(U_1 @ S_12 @ U_2)
weight = [u[:,:num_f].T @ U_1, vh[:num_f] @ U_2]
beta = [linalg.inv(U_1) @ u[:,:num_f], linalg.inv(U_2) @ vh[:num_f].T]
else:
print("Default initialization only supports 2 populations now.")
raise
weight = [w*np.sqrt(np.sum(b**2, 0))[:,None] for w, b in zip(weight,beta)]
beta = [b/np.sqrt(np.sum(b**2, 0)) for b in beta]
# initialization on other parameters
m_z_x = np.concatenate([np.matmul(w, dat-m)[...,None,:]
for m, w, dat in zip(mu, weight, data)], -2)
V_z_x = np.zeros((num_f,len(dims),num_time)*2)
m_zk_x = m_z_x.transpose((2,0,1,3))
V_zk_x = np.diagonal(V_z_x,0,1,4).transpose(4,0,1,2,3)
# mu = [np.mean(dat - b @ m, 0)
# for dat, m, b in zip(data, m_zk_x, beta)]
m_eps = [dat - b @ m1 - m2
for dat, m1, b, m2 in zip(data, m_zk_x, beta, mu)]
v_eps = [(np.sum(np.square(m))*(1+lambda_aug)/num_trial
+ np.trace(V.reshape(num_f*num_time,num_f*num_time)))
# /(d-1)
for m, V, d in zip(m_eps, V_zk_x, dims)]
V_eps_S = [np.tensordot(m,m,axes=((0,2),(0,2)))*(1+lambda_aug)/num_trial/v
+ [email protected](np.diagonal(V,0,1,3),-1)@b.T/v
for m, V, v, b, d in zip(m_eps, V_zk_x, v_eps, beta, dims)]
V_eps_T = [np.tensordot(m,np.linalg.pinv(V2)@m, axes=((0,1),(0,1)))
*(lambda_aug*np.eye(num_time)+1)/d/num_trial
+ np.tensordot(V1,[email protected](V2)@b,axes=([0,2],[0,1]))/d
for m, V1, V2, b, d in zip(m_eps, V_zk_x, V_eps_S, beta, dims)]
sd_eps_T = [np.sqrt(np.diag(V)) for V in V_eps_T]
R_eps_T = [V/sd/sd[:,None] for V, sd in zip(V_eps_T, sd_eps_T)]
Phi_T = [R*sd*sd[:,None] for sd, R in zip(sd_eps_T, R_eps_T)]
Gamma_T = [np.linalg.inv(P) for P in Phi_T]
V_zf = (np.diagonal(V_z_x,0,0,3).transpose((4,0,1,2,3))
+ (m_z_x.reshape((-1,num_f,len(dims)*num_time)).transpose((1,2,0))
@ m_z_x.reshape((-1,num_f,len(dims)*num_time)).transpose((1,0,2))
* (lambda_aug*np.eye(2*num_time)+1)/ num_trial)\
.reshape((num_f,len(dims),num_time,len(dims),num_time)))
Sigma, Phi_S = _switch_back(
V_zf.reshape(num_f,len(dims)*num_time,len(dims)*num_time),
V_eps_S, Gamma_T, Phi_T, beta)
if make_PD:
Phi_S = [_make_PD(P) for P in Phi_S]
for f in np.arange(num_f):
Sigma[f] = _make_PD(Sigma[f])
Gamma_S = [np.linalg.inv(P) for P in Phi_S]
sig = np.sqrt(np.diagonal(Sigma,0,1,2))
Rho = Sigma/sig[:,None,:]/sig[:,:,None]
Pi = | np.linalg.inv(Rho) | numpy.linalg.inv |
import numpy as np
from index_mesh import IndexMesh
from combine_cells import combine_cells
from util import find_local_numbering
class EMIMesh(IndexMesh):
def __init__(self, Nx, Ny, Nz, N):
super().__init__(Nx, Ny, Nz, N)
def set_up_mesh(G):
"Set up vectors containing the indices of the different types of nodes"
# Read geometry
Nx = G.Nx
Ny = G.Ny
Nz = G.Nz
N = G.N
mesh = EMIMesh(Nx, Ny, Nz, N)
# x-direction ranges from west (w) to east (e)
# y-direction ranges from south (s) to north (n)
# z-direction ranges from low (l) to high (h)
# Set up indices for outer boundary
# Set up indices for outer boundary (corners)
mesh.e_lsw = np.array([mesh[ 0, 0, 0]])
mesh.e_lse = np.array([mesh[Nx-1, 0, 0]])
mesh.e_lnw = np.array([mesh[ 0, Ny-1, 0]])
mesh.e_lne = np.array([mesh[Nx-1, Ny-1, 0]])
mesh.e_hsw = np.array([mesh[ 0, 0, Nz-1]])
mesh.e_hse = np.array([mesh[Nx-1, 0, Nz-1]])
mesh.e_hnw = np.array([mesh[ 0, Ny-1, Nz-1]])
mesh.e_hne = np.array([mesh[Nx-1, Ny-1, Nz-1]])
# Set up indices for outer boundary (lines)
# x direction
mesh.e_ls = mesh[ 1:Nx-1, 0, 0]
mesh.e_ln = mesh[ 1:Nx-1, Ny-1, 0]
mesh.e_hs = mesh[ 1:Nx-1, 0, Nz-1]
mesh.e_hn = mesh[ 1:Nx-1, Ny-1, Nz-1]
# y direction
mesh.e_lw = mesh[ 0, 1:Ny-1, 0]
mesh.e_le = mesh[ Nx-1, 1:Ny-1, 0]
mesh.e_hw = mesh[ 0, 1:Ny-1, Nz-1]
mesh.e_he = mesh[ Nx-1, 1:Ny-1, Nz-1]
# z direction
mesh.e_sw = mesh[ 0, 0, 1:Nz-1]
mesh.e_se = mesh[ Nx-1, 0, 1:Nz-1]
mesh.e_nw = mesh[ 0, Ny-1, 1:Nz-1]
mesh.e_ne = mesh[ Nx-1, Ny-1, 1:Nz-1]
# Set up indices for outer boundary (sides)
mesh.e_l = mesh[1:-1, 1:-1, 0]
mesh.e_h = mesh[1:-1, 1:-1, Nz-1]
mesh.e_s = mesh[1:-1, 0, 1:-1]
mesh.e_n = mesh[1:-1, Ny-1, 1:-1]
mesh.e_w = mesh[0, 1:-1, 1:-1]
mesh.e_e = mesh[Nx-1, 1:-1, 1:-1]
# Set up indices for the cell
mesh, cells = combine_cells(G, mesh)
print("Combined cells")
# Set up indices for the remaining extracellular cells
vec = np.ones(N)
index_sets_to_exclude_from_e = (
mesh.e_lsw, mesh.e_lse, mesh.e_lnw, mesh.e_lne,
mesh.e_hsw, mesh.e_hse, mesh.e_hnw, mesh.e_hne,
mesh.e_hw, mesh.e_he, mesh.e_hs, mesh.e_hn,
mesh.e_lw, mesh.e_le, mesh.e_ls, mesh.e_ln,
mesh.e_ne, mesh.e_sw, mesh.e_se, mesh.e_nw,
mesh.e_w, mesh.e_e, mesh.e_s, mesh.e_n, mesh.e_h, mesh.e_l,
mesh.m_lsw, mesh.m_lse, mesh.m_lnw, mesh.m_lne,
mesh.m_hsw, mesh.m_hse, mesh.m_hnw, mesh.m_hne,
mesh.m_hw, mesh.m_he, mesh.m_hs, mesh.m_hn,
mesh.m_lw, mesh.m_le, mesh.m_ls, mesh.m_ln,
mesh.m_ne, mesh.m_sw, mesh.m_se, mesh.m_nw,
mesh.m_w, mesh.m_e, mesh.m_s, mesh.m_n, mesh.m_h, mesh.m_l,
mesh.i_all
)
for index_set in index_sets_to_exclude_from_e:
vec[index_set] = 0
mesh.e = np.flatnonzero(vec)
# Set up indices for the membrane potential
mesh.v = np.concatenate([
mesh.m_lsw,
mesh.m_lse,
mesh.m_lnw,
mesh.m_lne,
mesh.m_hsw,
mesh.m_hse,
mesh.m_hnw,
mesh.m_hne,
mesh.m_hw,
mesh.m_he,
mesh.m_hs,
mesh.m_hn,
mesh.m_lw,
mesh.m_le,
mesh.m_ls,
mesh.m_ln,
mesh.m_ne,
mesh.m_sw,
mesh.m_se,
mesh.m_nw,
mesh.m_w,
mesh.m_e,
mesh.m_s,
mesh.m_n,
mesh.m_h,
mesh.m_l
])
mesh.v.sort()
# Set up indices for the gap junction
mesh.w = np.unique(np.concatenate([mesh.gx_e, mesh.gy_n]))
mesh.w.sort()
# Set up indices for the nodes not in the intracellular domain
mesh.not_i = np.concatenate([
mesh.e_lsw,
mesh.e_lse,
mesh.e_lnw,
mesh.e_lne,
mesh.e_hsw,
mesh.e_hse,
mesh.e_hnw,
mesh.e_hne,
mesh.e_hw,
mesh.e_he,
mesh.e_hs,
mesh.e_hn,
mesh.e_lw,
mesh.e_le,
mesh.e_ls,
mesh.e_ln,
mesh.e_ne,
mesh.e_sw,
mesh.e_se,
mesh.e_nw,
mesh.e_w,
mesh.e_e,
mesh.e_s,
mesh.e_n,
mesh.e_h,
mesh.e_l,
mesh.e,
mesh.gx_he,
mesh.gx_le,
mesh.gx_se,
mesh.gx_ne,
mesh.gx_lse,
mesh.gx_hse,
mesh.gx_lne,
mesh.gx_hne,
mesh.gy_ne,
mesh.gy_nw,
mesh.gy_ln,
mesh.gy_hn,
mesh.gy_lnw,
mesh.gy_hnw,
mesh.gy_lne,
mesh.gy_hne
])
mesh.not_i.sort()
# Set up indices for the extracellular domain
mesh.e_all = np.concatenate([
mesh.e_lsw, mesh.e_lse, mesh.e_lnw, mesh.e_lne,
mesh.e_hsw, mesh.e_hse, mesh.e_hnw, mesh.e_hne,
mesh.e_hw, mesh.e_he, mesh.e_hs, mesh.e_hn,
mesh.e_lw, mesh.e_le, mesh.e_ls, mesh.e_ln,
mesh.e_ne, mesh.e_sw, mesh.e_se, mesh.e_nw,
mesh.e_w, mesh.e_e, mesh.e_s, mesh.e_n, mesh.e_h, mesh.e_l,
mesh.m_lsw, mesh.m_lse, mesh.m_lnw, mesh.m_lne,
mesh.m_hsw, mesh.m_hse, mesh.m_hnw, mesh.m_hne,
mesh.m_hw, mesh.m_he, mesh.m_hs, mesh.m_hn,
mesh.m_lw, mesh.m_le, mesh.m_ls, mesh.m_ln,
mesh.m_ne, mesh.m_sw, mesh.m_se, mesh.m_nw,
mesh.m_w, mesh.m_e, mesh.m_s, mesh.m_n, mesh.m_h, mesh.m_l,
mesh.gx_he, mesh.gx_le, mesh.gx_se, mesh.gx_ne,
mesh.gy_hn, mesh.gy_ln, mesh.gy_nw, mesh.gy_ne,
mesh.gx_lse, mesh.gx_lne, mesh.gx_hse, mesh.gx_hne,
mesh.gy_lnw, mesh.gy_lne, mesh.gy_hnw, mesh.gy_hne,
mesh.e
])
mesh.e_all.sort()
mesh.i_all = np.unique(np.concatenate([
mesh.v, mesh.w, mesh.i, mesh.i_lsw, mesh.i_lse, mesh.i_lnw, mesh.i_lne, mesh.i_hsw, mesh.i_hse,
mesh.i_hnw, mesh.i_hne, mesh.i_hw, mesh.i_he, mesh.i_hs, mesh.i_hn,
mesh.i_lw, mesh.i_le, mesh.i_ls, mesh.i_ln, mesh.i_ne, mesh.i_sw,
mesh.i_se, mesh.i_nw, mesh.i_w, mesh.i_e, mesh.i_s, mesh.i_n, mesh.i_h,
mesh.i_l, mesh.gx_e, mesh.gy_n, mesh.gxi_w, mesh.gxi_hw, mesh.gxi_lw,
mesh.gxi_sw, mesh.gxi_nw, mesh.gyi_hs, mesh.gyi_ls, mesh.gyi_sw,
mesh.gyi_se, mesh.gyi_s, mesh.gxi_lsw, mesh.gxi_lnw, mesh.gxi_hsw,
mesh.gxi_hnw, mesh.gyi_lsw, mesh.gyi_lse, mesh.gyi_hsw, mesh.gyi_hse
]))
mesh.i_all.sort()
mesh.i_inner_all = np.unique(np.concatenate([
mesh.i,
mesh.i_lsw, mesh.i_lse, mesh.i_lnw, mesh.i_lne,
mesh.i_hsw, mesh.i_hse, mesh.i_hnw, mesh.i_hne,
mesh.i_hw, mesh.i_he, mesh.i_hs, mesh.i_hn,
mesh.i_lw, mesh.i_le, mesh.i_ls, mesh.i_ln,
mesh.i_ne, mesh.i_sw, mesh.i_se, mesh.i_nw,
mesh.i_w, mesh.i_e, mesh.i_s, mesh.i_n, mesh.i_l, mesh.i_h
]))
mesh.i_inner_all.sort()
# local numbering in mesh.v
# cells[n].v should not overlap with cells[k].v, where k != n
cells_v_idx = find_local_numbering(N, mesh.v, [cell.v for cell in cells])
for i, cell in enumerate(cells):
cell.v_idx = cells_v_idx[i]
del cells_v_idx
# local numbering in e_all
cells_v_of_e = find_local_numbering(N, mesh.e_all, [cell.v for cell in cells])
for i, cell in enumerate(cells):
cell.v_of_e = cells_v_of_e[i]
del cells_v_of_e
# local numbering in i_all
cells_v_of_i = find_local_numbering(N, mesh.i_all, [cell.v for cell in cells])
cells_gxw_of_i = find_local_numbering(N, mesh.i_all, [cell.gx_w for cell in cells])
cells_gxe_of_i = find_local_numbering(N, mesh.i_all, [cell.gx_e for cell in cells])
cells_gys_of_i = find_local_numbering(N, mesh.i_all, [cell.gy_s for cell in cells])
cells_gyn_of_i = find_local_numbering(N, mesh.i_all, [cell.gy_n for cell in cells])
for i, cell in enumerate(cells):
cell.v_of_i = cells_v_of_i[i]
cell.gxw_of_i = cells_gxw_of_i[i]
cell.gxe_of_i = cells_gxe_of_i[i]
cell.gys_of_i = cells_gys_of_i[i]
cell.gyn_of_i = cells_gyn_of_i[i]
del cells_v_of_i
del cells_gxw_of_i
del cells_gxe_of_i
del cells_gys_of_i
del cells_gyn_of_i
# cells[n].c_all can overlap with cells[k].c_all, where k != n
# solution: treat cell grid as checkered board so that direct neighbours are split in different sets.
# Swipe over the white first, then the black.
#cells[n].c_of_i
cell_indices_white = []
cell_indices_black = []
for j in range(G.num_cells_y):
for i in range(G.num_cells_x):
cell_index = j*G.num_cells_x + i
if (i + j) & 1 == 1:
cell_indices_black.append(cell_index)
else:
cell_indices_white.append(cell_index)
cells_c_of_i_white = find_local_numbering(N, mesh.i_all, [cells[i].c_all for i in cell_indices_white])
cells_c_of_i_black = find_local_numbering(N, mesh.i_all, [cells[i].c_all for i in cell_indices_black])
for i, cell_index in enumerate(cell_indices_white):
cells[cell_index].c_of_i = cells_c_of_i_white[i]
for i, cell_index in enumerate(cell_indices_black):
cells[cell_index].c_of_i = cells_c_of_i_black[i]
del cell_indices_white
del cell_indices_black
del cells_c_of_i_white
del cells_c_of_i_black
# local numbering in w
cells_gxw_of_w = find_local_numbering(N, mesh.w, [cell.gx_w for cell in cells])
cells_gxe_of_w = find_local_numbering(N, mesh.w, [cell.gx_e for cell in cells])
cells_gys_of_w = find_local_numbering(N, mesh.w, [cell.gy_s for cell in cells])
cells_gyn_of_w = find_local_numbering(N, mesh.w, [cell.gy_n for cell in cells])
for i, cell in enumerate(cells):
cell.gxw_of_w = cells_gxw_of_w[i]
cell.gxe_of_w = cells_gxe_of_w[i]
cell.gys_of_w = cells_gys_of_w[i]
cell.gyn_of_w = cells_gyn_of_w[i]
del cells_gxw_of_w
del cells_gxe_of_w
del cells_gys_of_w
del cells_gyn_of_w
# Inner membrane points
mesh.v_inner = np.concatenate([mesh.m_l, mesh.m_h, mesh.m_s, mesh.m_n, mesh.m_e, mesh.m_w])
mesh.v_inner.sort()
vec = np.zeros(N, dtype=np.int64)
vec[mesh.v_inner] = 1
vec = vec[mesh.v]
mesh.v_inner_of_v = np.flatnonzero(vec)
# Set up way to extract membrane points from the intracellular and extracellular domains
vec = np.zeros(N, dtype=np.int64)
vec[mesh.v] = 1
vec_of_i = vec[mesh.i_all]
vec_of_e = vec[mesh.e_all]
mesh.v_of_i = np.flatnonzero(vec_of_i)
mesh.v_of_e = np.flatnonzero(vec_of_e)
# v inner
vec = np.zeros(N, dtype=np.int64)
vec[mesh.v_inner] = 1
vec_of_i = vec[mesh.i_all]
vec_of_e = vec[mesh.e_all]
mesh.v_inner_of_i = np.flatnonzero(vec_of_i)
mesh.v_inner_of_e = np.flatnonzero(vec_of_e)
# Set up way to extract intercalated disc points from the intracellular domain
vec = np.zeros(N, dtype=np.int64)
vec[mesh.w] = 1
vec_of_i = vec[mesh.i_all]
mesh.w_of_i = | np.flatnonzero(vec_of_i) | numpy.flatnonzero |
# Copyright 2018, FBPIC contributors
# Authors: <NAME>, <NAME>
# License: 3-Clause-BSD-LBNL
"""
This file is part of the Fourier-Bessel Particle-In-Cell code (FB-PIC)
It defines a class for continuous particle injection with a moving window.
"""
import warnings
import numpy as np
from scipy.constants import c
import sys, inspect
class ContinuousInjector( object ):
"""
Class that stores a number of attributes that are needed for
continuous injection by a moving window.
"""
def __init__(self, Npz, zmin, zmax, dz_particles, Npr, rmin, rmax,
Nptheta, n, dens_func, ux_m, uy_m, uz_m, ux_th, uy_th, uz_th ):
"""
Initialize continuous injection
Parameters
----------
See the docstring of the `Particles` object
"""
# Register properties of the injected plasma
self.Npr = Npr
self.rmin = rmin
self.rmax = rmax
self.Nptheta = Nptheta
self.n = n
self.dens_func = dens_func
self.ux_m = ux_m
self.uy_m = uy_m
self.uz_m = uz_m
self.ux_th = ux_th
self.uy_th = uy_th
self.uz_th = uz_th
# Register spacing between evenly-spaced particles in z
if Npz != 0:
self.dz_particles = (zmax - zmin)/Npz
else:
# Fall back to the user-provided `dz_particles`.
# Note: this is an optional argument of `Particles` and so
# it is not always available.
self.dz_particles = dz_particles
# Register variables that define the positions
# where the plasma is injected.
self.v_end_plasma = c * uz_m / np.sqrt(1 + ux_m**2 + uy_m**2 + uz_m**2)
# These variables are set by `initialize_injection_positions`
self.nz_inject = None
self.z_inject = None
self.z_end_plasma = None
def initialize_injection_positions( self, comm, v_moving_window,
species_z, dt ):
"""
Initialize the positions that keep track of the injection of particles.
This is automatically called at the beginning of `step`.
Parameters
----------
comm: a BoundaryCommunicator object
Contains information about grid MPI decomposition
v_moving_window: float (in m/s)
The speed of the moving window
species_z: 1darray of float (in m)
(One element per macroparticle)
Used in order to infer the position of the end of the plasma
dt: float (in s)
Timestep of the simulation
"""
# The injection position is only initialized for the last proc
if comm.rank != comm.size-1:
return
# Initialize the injection position only if it has not be initialized
if self.z_inject is not None:
return
# Initialize plasma *ahead* of the right *physical*
# boundary of the box in the damping region (including the
# injection area) so that after `exchange_period` iterations
# (without adding new plasma), there will still be plasma
# inside the physical domain and the damping region (without the
# injection area). This ensures that there are never particles in the
# rightmost guard region and that there are always particles inside
# the damped region, where the field can be non-zero. New particles,
# which are injected in the Injection region, do not see any fields.
_, zmax_global_domain_with_damp = comm.get_zmin_zmax( local=False,
with_damp=True, with_guard=False )
self.z_inject = zmax_global_domain_with_damp \
+ (3-comm.n_inject)*comm.dz \
+ comm.exchange_period*dt*(v_moving_window-self.v_end_plasma)
self.nz_inject = 0
# Try to detect the position of the end of the plasma:
# Find the maximal position of the continously-injected particles
if len( species_z ) > 0:
# Add half of the spacing between particles (the
# injection function itself will add a half-spacing again)
self.z_end_plasma = species_z.max() + 0.5*self.dz_particles
else:
# Default value for empty species
_, zmax_global_physical_domain = comm.get_zmin_zmax( local=False,
with_damp=False, with_guard=False )
self.z_end_plasma = zmax_global_physical_domain
# Check that the particle spacing has been properly calculated
if self.dz_particles is None:
raise ValueError(
'The simulation uses continuous injection of particles, \n'
'but was unable to calculate the spacing between particles.\n'
'This may be because you used the `Particles` API directly.\n'
'In this case, please pass the argument `dz_particles` \n'
'initializing the `Particles` object.')
def reset_injection_positions( self ):
"""
Reset the variables that keep track of continuous injection to `None`
This is typically called when restarting a simulation from a checkpoint
"""
self.nz_inject = None
self.z_inject = None
self.z_end_plasma = None
def increment_injection_positions( self, v_moving_window, duration ):
"""
Update the positions between which the new particles will be generated,
the next time when `generate_particles` is called.
This function is automatically called when the moving window moves.
Parameters
----------
v_moving_window: float (in m/s)
The speed of the moving window
duration: float (in seconds)
The duration since the last time that the moving window moved.
"""
# Move the injection position
self.z_inject += v_moving_window * duration
# Take into account the motion of the end of the plasma
self.z_end_plasma += self.v_end_plasma * duration
# Increment the number of particle to add along z
nz_new = int( (self.z_inject - self.z_end_plasma)/self.dz_particles )
self.nz_inject += nz_new
# Increment the virtual position of the end of the plasma
# (When `generate_particles` is called, then the plasma
# is injected between z_end_plasma - nz_inject*dz_particles
# and z_end_plasma, and afterwards nz_inject is set to 0.)
self.z_end_plasma += nz_new * self.dz_particles
def generate_particles( self, time ):
"""
Generate new particles at the right end of the plasma
(i.e. between z_end_plasma - nz_inject*dz and z_end_plasma)
Parameters
----------
time: float (in second)
The current physical time of the simulation
"""
# Create a temporary density function that takes into
# account the fact that the plasma has moved
if self.dens_func is not None:
args = _check_dens_func_arguments( self.dens_func )
if args == ['z', 'r']:
def dens_func(z, r):
return self.dens_func( z - self.v_end_plasma*time, r )
elif args == ['x', 'y', 'z']:
def dens_func(x, y, z):
return self.dens_func( x, y, z - self.v_end_plasma*time )
else:
dens_func = None
# Create new particle cells
# Determine the positions between which new particles will be created
Npz = self.nz_inject
zmax = self.z_end_plasma
zmin = self.z_end_plasma - self.nz_inject*self.dz_particles
# Create the particles
Ntot, x, y, z, ux, uy, uz, inv_gamma, w = generate_evenly_spaced(
Npz, zmin, zmax, self.Npr, self.rmin, self.rmax,
self.Nptheta, self.n, dens_func,
self.ux_m, self.uy_m, self.uz_m,
self.ux_th, self.uy_th, self.uz_th )
# Reset the number of particle cells to be created
self.nz_inject = 0
return( Ntot, x, y, z, ux, uy, uz, inv_gamma, w )
# Utility functions
# -----------------
def generate_evenly_spaced( Npz, zmin, zmax, Npr, rmin, rmax,
Nptheta, n, dens_func, ux_m, uy_m, uz_m, ux_th, uy_th, uz_th ):
"""
Generate evenly-spaced particles, according to the density function
`dens_func`, and with the momenta given by the `ux/y/z` arguments.
Parameters
----------
See the docstring of the `Particles` object
"""
# Generate the particles and eliminate the ones that have zero weight ;
# infer the number of particles Ntot
if Npz*Npr*Nptheta > 0:
# Get the 1d arrays of evenly-spaced positions for the particles
dz = (zmax-zmin)*1./Npz
z_reg = zmin + dz*( np.arange(Npz) + 0.5 )
dr = (rmax-rmin)*1./Npr
r_reg = rmin + dr*( np.arange(Npr) + 0.5 )
dtheta = 2*np.pi/Nptheta
theta_reg = dtheta * np.arange(Nptheta)
# Get the corresponding particles positions
# (copy=True is important here, since it allows to
# change the angles individually)
zp, rp, thetap = np.meshgrid( z_reg, r_reg, theta_reg,
copy=True, indexing='ij' )
# Prevent the particles from being aligned along any direction
unalign_angles( thetap, Npz, Npr, method='random' )
# Flatten them (This performs a memory copy)
r = rp.flatten()
x = r * np.cos( thetap.flatten() )
y = r * np.sin( thetap.flatten() )
z = zp.flatten()
# Get the weights (i.e. charge of each macroparticle), which
# are equal to the density times the volume r d\theta dr dz
w = n * r * dtheta*dr*dz
# Modulate it by the density profile
if dens_func is not None :
args = _check_dens_func_arguments( dens_func )
if args == ['x', 'y', 'z']:
w *= dens_func( x=x, y=y, z=z )
elif args == ['z', 'r']:
w *= dens_func( z=z, r=r )
# Select the particles that have a non-zero weight
selected = (w > 0)
if np.any(w < 0):
warnings.warn(
'The specified particle density returned negative densities.\n'
'No particles were generated in areas of negative density.\n'
'Please check the validity of the `dens_func`.')
# Infer the number of particles and select them
Ntot = int(selected.sum())
x = x[ selected ]
y = y[ selected ]
z = z[ selected ]
w = w[ selected ]
# Initialize the corresponding momenta
uz = uz_m * np.ones(Ntot) + uz_th * np.random.normal(size=Ntot)
ux = ux_m * np.ones(Ntot) + ux_th * np.random.normal(size=Ntot)
uy = uy_m * np.ones(Ntot) + uy_th * np.random.normal(size=Ntot)
inv_gamma = 1./np.sqrt( 1 + ux**2 + uy**2 + uz**2 )
# Return the particle arrays
return( Ntot, x, y, z, ux, uy, uz, inv_gamma, w )
else:
# No particles are initialized ; the arrays are still created
Ntot = 0
return( Ntot, np.empty(0), np.empty(0), np.empty(0), np.empty(0),
np.empty(0), np.empty(0), np.empty(0), | np.empty(0) | numpy.empty |
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 14 10:28:23 2019
@author: thele
"""
from . import measurement_functions
from . import condition_functions
import numpy as np
class Investigation_stage():
def __init__(self,jump,measure,check,configs,timer,pygor=None):
self.jump = jump
self.measure = measure
self.check = check
self.configure_investigation_sequence(configs)
self.inv_max = len(self.aquisition_functions)
self.isdynamic = configs.get('cond_meas',[False]*self.inv_max)
self.stage_results = []
self.timer = timer
self.pygor = pygor
self.cond=list(range(1,1+self.inv_max))
self.conditional_idx_list = []
def configure_investigation_sequence(self,configs):
seq_keys = configs["measurement_seq"]
self.aquisition_functions = []
self.function_configs = []
self.cond_functions = []
for seq_key in seq_keys:
afunc_name = configs[seq_key].get('func','do_nothing')
cfunc_name = configs[seq_key].get('condition','check_nothing')
self.aquisition_functions += [getattr(measurement_functions,afunc_name)]
self.cond_functions += [getattr(condition_functions,cfunc_name)]
self.function_configs += [configs[seq_key]]
def do_extra_measure(self,params,minc,maxc,**kwags):
self.jump(params)
self.timer.start()
plunger_jump = lambda params:self.jump(params,True)
kwags['pygor'] = self.pygor
anchor_vals = self.check()
results_full = {}
results = []
all_resutls = [None]*self.inv_max
for i in range(self.inv_max):
data = self.aquisition_functions[i](plunger_jump,self.measure,anchor_vals,self.function_configs[i],**kwags)
self.timer.logtime()
check_result,continue_on,meta_info = self.cond_functions[i](data,minc,maxc,self.function_configs[i],**kwags)
if len(self.stage_results)>0:
np.array(self.stage_results,dtype=np.float)
past_results = np.array(self.stage_results,dtype=np.float)[:,i]
continue_on = bool_cond(check_result,past_results[~np.isnan(past_results)],**self.isdynamic[i]) if isinstance(self.isdynamic[i],dict) else continue_on
all_resutls[i] = check_result
if isinstance(meta_info,dict):
new_kwags = meta_info.get('kwags',None)
if new_kwags is not None:
kwags['last_check'] = new_kwags
results += [[check_result,continue_on,meta_info,data]]
if not continue_on:
break
self.stage_results += [all_resutls]
self.timer.stop()
results_full['extra_measure'] = results
results_full['conditional_idx'] = self.cond[i]
results_full['times'] = self.timer.times_list[-1]
return results_full
def bool_cond(score,past,min_thresh=0.0001,min_data=10,quantile=0.85):
th_score = np.maximum(min_thresh, | np.quantile(past, quantile) | numpy.quantile |
import numpy as np
from scipy.signal import savgol_filter
from qube.postprocess.dataset import Axis
def create_name(name, suffix=None, prefix=None):
elements = []
if prefix:
elements.append(str(prefix))
elements.append(str(name))
if suffix:
elements.append(str(suffix))
name = '_'.join(elements)
return name
def duplicate_dataset(dataset, suffix=None, prefix=None, custom_name=None):
new_ds = dataset.copy(shallow_copy=False)
if custom_name:
name = custom_name
else:
name = dataset.name
new_ds.name = create_name(name, suffix, prefix)
return new_ds
def remove_dim_in_axes(axes, dim=None):
new_axes = []
if dim is not None:
for si in axes:
ax = si.copy(shallow_copy=False)
if si.dim != dim and si.dim > dim:
ax.dim = si.dim - 1
new_axes.append(ax)
elif si.dim < dim:
new_axes.append(ax)
return new_axes
def histogram1d(dataset, bins=10, range=None, normed=None, weights=None, density=None):
ds = dataset.copy()
ds.name = f'{ds.name}_hist1d'
hist, bins = np.histogram(
ds.value,
bins=bins,
range=range,
normed=normed,
weights=weights,
density=density,
)
bins = bins[:-1] # remove 1 point for ax.plot
axis = Axis(
name=ds.name,
value=bins,
unit=ds.unit,
dim=0,
)
ds.value = hist
ds.unit = 'Counts'
ds.axes = {axis.name: axis}
return ds
def take(dataset, indices, axis=None):
ds = dataset.copy()
ds.name = f'{ds.name}_take'
ds.value = np.take(ds.value, indices=indices, axis=axis)
old_axes = ds.get_axes(counters=False)
ds.clear_axes()
if axis is not None:
for si in old_axes:
ax = si.copy(shallow_copy=False)
if si.dim == axis:
ax.value = | np.take(ax.value, indices=indices) | numpy.take |
from abc import abstractmethod
import numpy as np
import torch
from codes.c_models.base_model import RNNModel
from codes.d_agents.a0_base_agent import BaseAgent
from codes.e_utils import replay_buffer
from codes.e_utils.common_utils import float32_preprocessor
from codes.e_utils.names import RLAlgorithmName
class OffPolicyAgent(BaseAgent):
"""
Abstract Agent interface
"""
def __init__(self, worker_id, action_shape, params, device):
super(OffPolicyAgent, self).__init__(worker_id, action_shape, params, device)
if hasattr(self.params, "PER_PROPORTIONAL") and self.params.PER_PROPORTIONAL:
self.buffer = replay_buffer.PrioritizedReplayBuffer(
experience_source=None, buffer_size=self.params.REPLAY_BUFFER_SIZE,
n_step=self.params.N_STEP, beta_start=0.4, beta_frames=self.params.MAX_GLOBAL_STEP
)
elif hasattr(self.params, "PER_RANK_BASED") and self.params.PER_RANK_BASED:
self.buffer = replay_buffer.RankBasedPrioritizedReplayBuffer(
experience_source=None, buffer_size=self.params.REPLAY_BUFFER_SIZE,
params=self.params, alpha=0.7, beta_start=0.5, beta_frames=self.params.MAX_GLOBAL_STEP
)
else:
self.buffer = replay_buffer.ExperienceReplayBuffer(
experience_source=None, buffer_size=self.params.REPLAY_BUFFER_SIZE
)
def train_off_policy(self, step_idx, local_step_idx):
train_results = self.on_train(step_idx, local_step_idx)
self.training_steps += 1
return train_results
@abstractmethod
def on_train(self, step_idx, local_step_idx):
raise NotImplementedError
def unpack_batch(self, batch):
state, actions, rewards, dones, last_states, agent_states = [], [], [], [], [], []
if isinstance(self.model, RNNModel):
actor_hidden_states = []
if self.params.RL_ALGORITHM in [RLAlgorithmName.DDPG_V0]:
critic_hidden_states = []
critic_1_hidden_states = None
critic_2_hidden_states = None
elif self.params.RL_ALGORITHM in [RLAlgorithmName.TD3_V0]:
critic_hidden_states = None
critic_1_hidden_states = []
critic_2_hidden_states = []
else:
raise ValueError()
else:
actor_hidden_states = critic_hidden_states = critic_1_hidden_states = critic_2_hidden_states = None
for exp in batch:
state.append(np.array(exp.state, copy=False))
actions.append(exp.action)
rewards.append(exp.reward)
dones.append(exp.last_state is None)
if exp.last_state is None:
last_states.append(exp.state) # the result will be masked anyway
else:
last_states.append( | np.array(exp.last_state, copy=False) | numpy.array |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.