prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
import os
import numpy as np
import random
import torch
import torch.utils.data as dataf
import torch.nn as nn
import matplotlib.pyplot as plt
from scipy import io
from sklearn.decomposition import PCA
# setting parameters
DataPath = '/home/hrl/PycharmProjects/untitled/Hyperspectral/Data/FixedTrainSam/Houston/Houston.mat'
TRPath = '/home/hrl/PycharmProjects/untitled/Hyperspectral/Data/FixedTrainSam/Houston/TRLabel.mat'
TSPath = '/home/hrl/PycharmProjects/untitled/Hyperspectral/Data/FixedTrainSam/Houston/TSLabel.mat'
savepath = '/home/hrl/PycharmProjects/untitled/Hyperspectral/Data/FixedTrainSam/W3-DLSection/HU2013/2DCNN-14.mat'
patchsize = 16 # input spatial size for 2D-CNN
batchsize = 128 # select from [16, 32, 64, 128], the best is 64
EPOCH = 200
LR = 0.001
# load data
Data = io.loadmat(DataPath)
TrLabel = io.loadmat(TRPath)
TsLabel = io.loadmat(TSPath)
Data = Data['Houston']
Data = Data.astype(np.float32)
TrLabel = TrLabel['TRLabel']
TsLabel = TsLabel['TSLabel']
# without dimensionality reduction
pad_width = np.floor(patchsize/2)
pad_width = np.int(pad_width)
# normalization method 2: map to zero mean and one std
[m, n, l] = np.shape(Data)
# x2 = np.empty((m+pad_width*2, n+pad_width*2, l), dtype='float32')
for i in range(l):
mean = np.mean(Data[:, :, i])
std = np.std(Data[:, :, i])
Data[:, :, i] = (Data[:, :, i] - mean)/std
# x2[:, :, i] = np.pad(Data[:, :, i], pad_width, 'symmetric')
# # extract the first principal component
# x = np.reshape(Data, (m*n, l))
# pca = PCA(n_components=0.995, copy=True, whiten=False)
# x = pca.fit_transform(x)
# _, l = x.shape
# x = np.reshape(x, (m, n, l))
# # print x.shape
# # plt.figure()
# # plt.imshow(x)
# # plt.show()
x = Data
# boundary interpolation
temp = x[:,:,0]
pad_width = np.floor(patchsize/2)
pad_width = np.int(pad_width)
temp2 = np.pad(temp, pad_width, 'symmetric')
[m2,n2] = temp2.shape
x2 = np.empty((m2, n2, l), dtype='float32')
for i in range(l):
temp = x[:,:,i]
pad_width = np.floor(patchsize/2)
pad_width = np.int(pad_width)
temp2 = np.pad(temp, pad_width, 'symmetric')
x2[:,:,i] = temp2
# construct the training and testing set
[ind1, ind2] = np.where(TrLabel != 0)
TrainNum = len(ind1)
TrainPatch = np.empty((TrainNum, l, patchsize, patchsize), dtype='float32')
TrainLabel = np.empty(TrainNum)
ind3 = ind1 + pad_width
ind4 = ind2 + pad_width
for i in range(len(ind1)):
# patch = x2[(ind3[i] - pad_width):(ind3[i] + pad_width + 1), (ind4[i] - pad_width):(ind4[i] + pad_width + 1), :]
patch = x2[(ind3[i] - pad_width):(ind3[i] + pad_width), (ind4[i] - pad_width):(ind4[i] + pad_width), :]
patch = np.reshape(patch, (patchsize * patchsize, l))
patch = np.transpose(patch)
patch = np.reshape(patch, (l, patchsize, patchsize))
TrainPatch[i, :, :, :] = patch
patchlabel = TrLabel[ind1[i], ind2[i]]
TrainLabel[i] = patchlabel
[ind1, ind2] = | np.where(TsLabel != 0) | numpy.where |
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from .rpn_msr.proposal_layer import ProposalLayer
from .rpn_msr.anchor_target_layer import AnchorTargerLayer
from rpn_msr.proposal_target_layer import ProposalTargetLayer
from .network import vgg16, Conv2d, np_to_tensor, FC, smooth_l1_loss
from roi_pooling.modules.roi_pool import RoIPool
from .fastrcnn.bbox_transform import bbox_transform_inv, clip_boxes
from .fastrcnn.nms_wrapper import nms
from PIL import Image
from torchvision import transforms
import logging
logger = logging.getLogger("root")
logger.setLevel(logging.DEBUG)
def nms_detections(pred_boxes, scores, nms_thresh, inds=None):
dets = np.hstack((pred_boxes,
scores[:, np.newaxis])).astype(np.float32)
keep = nms(dets, nms_thresh)
if inds is None:
return pred_boxes[keep], scores[keep]
return pred_boxes[keep], scores[keep], inds[keep]
class RPN(nn.Module):
"""Generate region proposals, shares computation with the object detection network.
Attributes
----------
anchor_scales : list
The scale of each anchor on particular point on feature maps.
anchor_target_layer : :class:`faster_rcnn.rpn_msr.anchor_target_layer.AnchorTargerLayer`
Calculate network target base on anchors and ground truth boxes.
bbox_conv : :class:`torch.nn.module`
Proposals coordinate refine predictor
conv1 : :class:`torch.nn.module`
Probability that anchors contains object predictor
cross_entropy : int
Cross entropy loss.
features : :class:`torch.nn.module`
Backbone network, that share computation with object detection network
loss_box : int
Box coordinate refine loss.
proposal_layer : :class:`faster_rcnn.rpn_msr.proposal_layer.ProposalLayer`
Create proposals base on generated anchors and bbox refine values.
score_conv : TYPE
Description
"""
_feat_stride = [16, ]
anchor_scales = [4, 8, 16, 32]
def __init__(self):
super(RPN, self).__init__()
self.features = vgg16()
self.features = nn.DataParallel(self.features)
self.conv1 = nn.DataParallel(Conv2d(512, 512, 3, same_padding=True))
self.score_conv = nn.DataParallel(Conv2d(
512, len(self.anchor_scales) * 3 * 2, 1, relu=False))
self.bbox_conv = nn.DataParallel(Conv2d(
512, len(self.anchor_scales) * 3 * 4, 1, relu=False))
self.anchor_target_layer = AnchorTargerLayer(
self._feat_stride, self.anchor_scales)
self.proposal_layer = ProposalLayer(
self._feat_stride, self.anchor_scales)
def _computer_forward(self, im_data):
"""Calculate forward
Parameters
----------
im_data : :class:`torch.tensor`
image as tensor
Returns
-------
(:class:`torch.tensor`, :class:`torch.tensor`, :class:`torch.tensor`)
Return feature map, proposal boxes refine values w.r.t to each anchors, probability that anchors is foreground
"""
features = self.features(im_data) # (N, 512, W, H)
rpn_conv1 = self.conv1(features) # (N, 512, W, H)
# rpn score
rpn_cls_score = self.score_conv(rpn_conv1) # (N, A * 2, W, H)
# rpn boxes
rpn_bbox_pred = self.bbox_conv(rpn_conv1)
return features, rpn_bbox_pred, rpn_cls_score
def forward(self,
im_data,
im_info, gt_boxes=None, gt_boxes_index=[]):
"""Forward
Parameters
----------
im_data : TYPE
Description
im_info : TYPE
Description
gt_boxes : None, optional
Description
gt_boxes_index : list, optional
Description
Returns
-------
tuple(features, rois)
Return the features map and list of rois.
"""
im_data = im_data.to(torch.device('cuda'))
features, rpn_bbox_pred, rpn_cls_score = self._computer_forward(
im_data)
batch_size = features.shape[0]
# rpn_cls_score : batch ,(num_anchors * 2) , h ,w = 1 , (4 * 3 * 2) , h , w
rpn_cls_score_reshape = rpn_cls_score.view(
batch_size, 2, -1, rpn_cls_score.shape[-1]) # batch , 2 , (num_anchors*h) , w
rpn_cls_prob = F.softmax(rpn_cls_score_reshape, dim=1)
rpn_cls_prob_reshape = rpn_cls_prob.view_as(
rpn_cls_score) # batch , h , w , (num_anchors * 2)
cfg_key = 'TRAIN' if self.training else 'TEST'
rois = self.proposal_layer(rpn_cls_prob_reshape, rpn_bbox_pred,
im_info, cfg_key)
if self.training:
assert gt_boxes is not None
# list GT boxes
target = self.anchor_target_layer(
rpn_cls_score, gt_boxes, gt_boxes_index, im_info)
# self.cross_entropy, self.loss_box = self.build_loss(
# rpn_cls_score_reshape, rpn_bbox_pred, target)
else:
target = None
return features, rois, rpn_cls_prob_reshape, rpn_bbox_pred, target
@staticmethod
def build_loss(rpn_cls_score_reshape, rpn_bbox_pred, target):
# classification loss
rpn_cls_score = rpn_cls_score_reshape.permute(
0, 2, 3, 1).contiguous().view(-1, 2) # batch * h * w * a , 2
rpn_label = target[0].permute(0, 2, 3, 1).contiguous().view(-1)
rpn_keep = torch.tensor(
rpn_label.data.ne(-1).nonzero().squeeze()).cuda()
rpn_cls_score = torch.index_select(rpn_cls_score, 0, rpn_keep)
rpn_label = torch.index_select(rpn_label, 0, rpn_keep)
rpn_cross_entropy = F.cross_entropy(rpn_cls_score, rpn_label)
# box loss
rpn_bbox_targets, rpn_bbox_inside_weights, bbox_outside_weights = target[1:]
rpn_loss_box = smooth_l1_loss(
rpn_bbox_pred, rpn_bbox_targets, rpn_bbox_inside_weights, bbox_outside_weights, sigma=3.0, dim=[1, 2, 3])
return rpn_cross_entropy, rpn_loss_box
def predict_rois(self, im_data, im_info):
self.eval()
_, rois = self(im_data, im_info)
return rois
class FastRCNN(nn.Module):
"""docstring for FasterRCNN
Attributes
----------
bbox_fc : TYPE
Description
classes : TYPE
Description
cross_entropy : TYPE
Description
debug : TYPE
Description
fc6 : TYPE
Description
fc7 : TYPE
Description
loss_box : TYPE
Description
MAX_SIZE : int
Description
n_classes : TYPE
Description
proposal_target_layer : TYPE
Description
roi_pool : TYPE
Description
rpn : TYPE
Description
SCALES : tuple
Description
score_fc : TYPE
Description
"""
SCALES = (600, )
MAX_SIZE = 1000
def __init__(self, classes, debug=False):
super(FastRCNN, self).__init__()
assert classes is not None
self.classes = classes
self.n_classes = len(classes)
# self.features = vgg16()
self.rpn = RPN()
self.proposal_target_layer = ProposalTargetLayer(self.n_classes)
self.roi_pool = RoIPool(7, 7, 1.0 / 16)
self.fc6 = nn.DataParallel(FC(512 * 7 * 7, 4096))
self.fc7 = nn.DataParallel(FC(4096, 4096))
self.score_fc = nn.DataParallel(FC(4096, self.n_classes, relu=False))
self.bbox_fc = nn.DataParallel(
FC(4096, self.n_classes * 4, relu=False))
self.debug = debug
def forward(self, im_data, im_info, gt_boxes=None, gt_boxes_index=[]):
"""Summary
Parameters
----------
im_data : TYPE
Description
im_info : TYPE
Description
gt_boxes : None, optional
Description
gt_boxes_index : list, optional
Description
Returns
-------
TYPE
Description
"""
features, rois, rpn_cls_prob_reshape, rpn_bbox_pred, rpn_target = self.rpn(
im_data, im_info, gt_boxes, gt_boxes_index)
if self.training:
target = self.proposal_target_layer(
rois, gt_boxes, gt_boxes_index)
rois = target[0]
else:
target = None
rois = rois.reshape(-1, 5).type(torch.FloatTensor).to(torch.device("cuda"))
# Roi pool
pooled_features = self.roi_pool(features, rois)
x = pooled_features.view(pooled_features.size()[0], -1)
x = self.fc6(x)
x = F.dropout(x, training=self.training)
x = self.fc7(x)
x = F.dropout(x, training=self.training)
cls_score = self.score_fc(x)
cls_prob = F.softmax(cls_score, dim=1)
bbox_pred = self.bbox_fc(x)
return cls_prob, bbox_pred, rois, cls_score, target, rpn_cls_prob_reshape, rpn_bbox_pred, rpn_target
@staticmethod
def build_loss(cls_score, bbox_pred, target):
label = target[1].squeeze()
fg_cnt = torch.sum(label.data.ne(0))
bg_cnt = label.data.numel() - fg_cnt
ce_weights = torch.ones(cls_score.size()[1])
ce_weights[0] = float(fg_cnt.item()) / bg_cnt.item()
ce_weights = ce_weights.cuda()
cross_entropy = F.cross_entropy(cls_score, label, weight=ce_weights)
bbox_targets, bbox_inside_weights, bbox_outside_weights = target[2:]
loss_box = smooth_l1_loss(
bbox_pred, bbox_targets, bbox_inside_weights, bbox_outside_weights, dim=[1])
return cross_entropy, loss_box
def interpret(self, cls_prob, bbox_pred, rois, im_info, im_shape, nms=True, clip=True, min_score=0.0):
# find class
scores, inds = cls_prob.data.max(1)
scores, inds = scores.cpu().numpy(), inds.cpu().numpy()
keep = np.where((inds > 0) & (scores >= min_score))
scores, inds = scores[keep], inds[keep]
# Apply bounding-box regression deltas
keep = keep[0]
box_deltas = bbox_pred.data.cpu().numpy()[keep]
box_deltas = np.asarray([
box_deltas[i, (inds[i] * 4): (inds[i] * 4 + 4)] for i in range(len(inds))
], dtype=np.float)
boxes = rois.data.cpu().numpy()[keep, 1:5]
if len(boxes) == 0:
return | np.array([]) | numpy.array |
"""Split-Mix Federated Learning"""
import sys, os, argparse, copy, time
import numpy as np
import wandb
from tqdm import tqdm
import torch
from torch import nn, optim
from torch.nn.modules.batchnorm import _NormBase
# federated
from federated.learning import train_slimmable, test, fed_test_model, refresh_bn, test_dbn
# model and data
from nets.models import ScalableModule
from nets.slimmable_models import EnsembleNet, EnsembleSubnet
# utils
from utils.utils import set_seed, AverageMeter, CosineAnnealingLR, \
MultiStepLR, LocalMaskCrossEntropyLoss, str2bool
from utils.config import CHECKPOINT_ROOT
# NOTE import desired federation
from federated.core import SplitFederation as Federation, AdversaryCreator
def render_run_name(args, exp_folder):
"""Return a unique run_name from given args."""
if args.model == 'default':
args.model = {'Digits': 'ens_digit', 'Cifar10': 'ens_preresnet18', 'DomainNet': 'ens_alex'}[args.data]
run_name = f'{args.model}'
run_name += Federation.render_run_name(args)
# log non-default args
if args.seed != 1: run_name += f'__seed_{args.seed}'
# opt
if args.lr_sch != 'none': run_name += f'__lrs_{args.lr_sch}'
if args.opt != 'sgd': run_name += f'__opt_{args.opt}'
if args.batch != 32: run_name += f'__batch_{args.batch}'
if args.wk_iters != 1: run_name += f'__wk_iters_{args.wk_iters}'
# slimmable
if args.no_track_stat: run_name += f"__nts"
# split-mix
if not args.rescale_init: run_name += '__nri'
if not args.rescale_layer: run_name += '__nrl'
if args.loss_temp != 'none': run_name += f'__lt{args.loss_temp}'
if args.lbn: run_name += '__lbn'
# adv train
if args.adv_lmbd > 0:
run_name += f'__at{args.adv_lmbd}'
args.save_path = os.path.join(CHECKPOINT_ROOT, exp_folder)
if not os.path.exists(args.save_path):
os.makedirs(args.save_path)
SAVE_FILE = os.path.join(args.save_path, run_name)
return run_name, SAVE_FILE
def get_model_fh(data, model, atom_slim_ratio):
# FIXME Only use EnsembleNet or Slimmable model.
if data == 'Digits':
if model in ['digit']:
from nets.slimmable_models import SlimmableDigitModel
# TODO remove. Function the same as ens_digit
ModelClass = SlimmableDigitModel
elif model == 'ens_digit':
from nets.models import DigitModel
ModelClass = lambda **kwargs: EnsembleNet(
base_net=DigitModel, atom_slim_ratio=atom_slim_ratio,
rescale_init=args.rescale_init, rescale_layer=args.rescale_layer, **kwargs)
else:
raise ValueError(f"Invalid model: {model}")
elif data in ['DomainNet']:
if model in ['alex']:
from nets.slimmable_models import SlimmableAlexNet
ModelClass = SlimmableAlexNet
elif model == 'ens_alex':
from nets.models import AlexNet
ModelClass = lambda **kwargs: EnsembleNet(
base_net=AlexNet, atom_slim_ratio=atom_slim_ratio,
rescale_init=args.rescale_init, rescale_layer=args.rescale_layer, **kwargs)
else:
raise ValueError(f"Invalid model: {model}")
elif data == 'Cifar10':
if model in ['preresnet18']: # From heteroFL
from nets.HeteFL.slimmable_preresne import resnet18
ModelClass = resnet18
elif model in ['ens_preresnet18']:
if args.no_track_stat:
# FIXME remove on release
from nets.HeteFL.preresne import resnet18
else:
from nets.HeteFL.preresnet import resnet18
ModelClass = lambda **kwargs: EnsembleNet(
base_net=resnet18, atom_slim_ratio=atom_slim_ratio,
rescale_init=args.rescale_init, rescale_layer=args.rescale_layer, **kwargs)
else:
raise ValueError(f"Invalid model: {model}")
else:
raise ValueError(f"Unknown dataset: {data}")
return ModelClass
def fed_test(fed, running_model, verbose, adversary=None, val_mix_model=None):
mark = 's' if adversary is None else 'r'
val_acc_list = [None for _ in range(fed.client_num)]
val_loss_mt = AverageMeter()
slim_val_acc_mt = {slim_ratio: AverageMeter() for slim_ratio in fed.val_slim_ratios}
for client_idx in range(fed.client_num):
fed.download(running_model, client_idx)
for i_slim_ratio, slim_ratio in enumerate(fed.val_slim_ratios):
# Load and set slim ratio
if isinstance(running_model, EnsembleNet):
running_model.switch_slim_mode(slim_ratio)
val_mix_model = running_model
else:
# FIXME ad-hoc for SlimmableNet
running_model.switch_slim_mode(1.0) # full net should load the full net
val_mix_model.full_net.load_state_dict(running_model.state_dict())
val_mix_model.set_total_slim_ratio(slim_ratio)
# Test
if running_model.bn_type.startswith('d'):
val_loss, val_acc = test_dbn(val_mix_model, val_loaders[client_idx], loss_fun, device,
adversary=adversary, att_BNn=True, detector='gt')
else:
val_loss, val_acc = test(val_mix_model, val_loaders[client_idx], loss_fun, device,
adversary=adversary)
# Log
val_loss_mt.append(val_loss)
val_acc_list[client_idx] = val_acc # NOTE only record the last slim_ratio.
if verbose > 0:
print(' {:<19s} slim {:.2f}| Val {:s}Loss: {:.4f} | Val {:s}Acc: {:.4f}'.format(
'User-' + fed.clients[client_idx] if i_slim_ratio == 0 else ' ', slim_ratio,
mark.upper(), val_loss, mark.upper(), val_acc))
wandb.log({
f"{fed.clients[client_idx]} sm{slim_ratio:.2f} val_s-acc": val_acc,
}, commit=False)
if slim_ratio == fed.user_max_slim_ratios[client_idx]:
wandb.log({
f"{fed.clients[client_idx]} val_{mark}-acc": val_acc,
}, commit=False)
slim_val_acc_mt[slim_ratio].append(val_acc)
slim_val_acc_dict = {k: mt.avg if len(mt) > 0 else None for k, mt in slim_val_acc_mt.items()}
wandb.log({
f"slim{k:.2f} val_sacc": acc for k, acc in slim_val_acc_dict.items()
}, commit=False)
return val_acc_list, val_loss_mt.avg
if __name__ == '__main__':
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
np.seterr(all='raise') # make sure warning are raised as exception
parser = argparse.ArgumentParser()
# basic problem setting
parser.add_argument('--seed', type=int, default=1, help='random seed')
parser.add_argument('--data', type=str, default='Digits', help='data name')
parser.add_argument('--model', type=str.lower, default='default', help='model name')
parser.add_argument('--no_track_stat', action='store_true', help='disable BN tracking')
parser.add_argument('--test_refresh_bn', action='store_true', help='refresh BN before test')
# control
parser.add_argument('--no_log', action='store_true', help='disable wandb log')
parser.add_argument('--test', action='store_true', help='test the pretrained model')
parser.add_argument('--resume', action='store_true', help='resume training from checkpoint')
parser.add_argument('--verbose', type=int, default=0, help='verbose level: 0 or 1')
# federated
Federation.add_argument(parser)
# optimization
parser.add_argument('--lr', type=float, default=1e-2, help='learning rate')
parser.add_argument('--lr_sch', type=str, default='none', help='learning rate schedule')
parser.add_argument('--opt', type=str.lower, default='sgd', help='optimizer')
parser.add_argument('--iters', type=int, default=300, help='#iterations for communication')
parser.add_argument('--wk_iters', type=int, default=1, help='#epochs in local train')
# slimmable test
parser.add_argument('--test_slim_ratio', type=float, default=1.,
help='slim_ratio of model at testing.')
parser.add_argument('--sort_bases', action='store_true', help='sort base models by val acc.')
# split-mix
parser.add_argument('--rescale_init', type=str2bool, default=True, help='rescale init after slim')
parser.add_argument('--rescale_layer', type=str2bool, default=True, help='rescale layer outputs after slim')
parser.add_argument('--loss_temp', type=str, default='none',
help='temper cross-entropy loss (str|float):'
' auto - set temp as the width scale; none - no temper; '
'other float values.')
parser.add_argument('--lbn', type=str2bool, default=False, help='use client-local BN stats (valid if tracking stats)')
# adversarial train
parser.add_argument('--adv_lmbd', type=float, default=0.,
help='adv coefficient in [0,1]; default 0 for standard training.')
parser.add_argument('--test_noise', choices=['none', 'LinfPGD'], default='none')
parser.add_argument('--test_adv_lmbd', type=float, default=0.)
args = parser.parse_args()
set_seed(args.seed)
# set experiment files, wandb
exp_folder = f'SplitMix_{args.data}'
run_name, SAVE_FILE = render_run_name(args, exp_folder)
wandb.init(group=run_name[:120], project=exp_folder, mode='offline' if args.no_log else 'online',
config={**vars(args), 'save_file': SAVE_FILE})
# /////////////////////////////////
# ///// Fed Dataset and Model /////
# /////////////////////////////////
fed = Federation(args.data, args)
# Data
train_loaders, val_loaders, test_loaders = fed.get_data()
mean_batch_iters = int(np.mean([len(tl) for tl in train_loaders]))
print(f" mean_batch_iters: {mean_batch_iters}")
# Model
ModelClass = get_model_fh(args.data, args.model, args.atom_slim_ratio)
running_model = ModelClass(
track_running_stats=not args.no_track_stat or (args.test and args.test_refresh_bn), num_classes=fed.num_classes,
bn_type='dbn' if 0. < args.adv_lmbd < 1. else 'bn',
slimmable_ratios=fed.train_slim_ratios,
).to(device)
# mixed model for validation.
val_mix_model = running_model if isinstance(running_model, EnsembleNet) \
else EnsembleSubnet(copy.deepcopy(running_model), args.atom_slim_ratio)
# adversary
if args.adv_lmbd > 0. or args.test:
assert isinstance(running_model, EnsembleNet), "Did not create adv for val_mix_model"
make_adv = AdversaryCreator(args.test_noise if args.test else 'LinfPGD')
adversary = make_adv(running_model)
else:
adversary = None
# Loss
if args.pu_nclass > 0: # niid
loss_fun = LocalMaskCrossEntropyLoss(fed.num_classes)
else:
loss_fun = nn.CrossEntropyLoss()
# Use running model to init a fed aggregator
fed.make_aggregator(running_model, local_bn=args.lbn)
# /////////////////
# //// Resume /////
# /////////////////
# log the best for each model on all datasets
best_epoch = 0
best_acc = [0. for j in range(fed.client_num)]
train_elapsed = [[] for _ in range(fed.client_num)]
start_epoch = 0
if args.resume or args.test:
if os.path.exists(SAVE_FILE):
print(f'Loading chkpt from {SAVE_FILE}')
checkpoint = torch.load(SAVE_FILE)
best_epoch, best_acc = checkpoint['best_epoch'], checkpoint['best_acc']
train_elapsed = checkpoint['train_elapsed']
start_epoch = int(checkpoint['a_iter']) + 1
fed.model_accum.load_state_dict(checkpoint['server_model'])
print('Resume training from epoch {} with best acc:'.format(start_epoch))
for client_idx, acc in enumerate(best_acc):
print(' Best user-{:<10s}| Epoch:{} | Val Acc: {:.4f}'.format(
fed.clients[client_idx], best_epoch, acc))
else:
if args.test:
raise FileNotFoundError(f"Not found checkpoint at {SAVE_FILE}")
else:
print(f"Not found checkpoint at {SAVE_FILE}\n **Continue without resume.**")
# ///////////////
# //// Test /////
# ///////////////
if args.test:
wandb.summary[f'best_epoch'] = best_epoch
# wandb.summary[f'per_epoch_train_elapsed'] = np.sum([np.mean(client_ts) for client_ts in train_elapsed])
# val to select base models
if args.sort_bases and isinstance(running_model, EnsembleNet):
base_accs = []
print(f"Evaluate base models..")
for base_idx in tqdm(range(fed.num_base), file=sys.stdout):
running_model.switch_slim_mode(fed.args.atom_slim_ratio, base_idx)
val_acc = fed_test_model(fed, running_model, val_loaders, loss_fun, device)
base_accs.append(val_acc)
print(f" Base Accs: {', '.join([f'{a:.3f}' for a in base_accs])}")
base_idxs = np.argsort(base_accs)[::-1]
print(f" Sorted base indexes: {base_idxs}")
running_model.base_idxs = base_idxs
# fed.download()
# Set up model with specified width
print(f" Test model: {args.model} x{args.test_slim_ratio} lmbd{args.test_adv_lmbd}"
+ ('' if args.test_noise == 'none' else f' with {args.test_noise} noise'))
assert args.atom_slim_ratio > 0, "When ensemble, the atom ratio has to be defined by" \
f" args.slim_ratio > 0. But got {args.atom_slim_ratio}"
print(f" Ensemble {int(args.test_slim_ratio / args.atom_slim_ratio)} "
f"{args.atom_slim_ratio} base nets")
if not isinstance(running_model, EnsembleNet):
assert args.adv_lmbd == 0, "Not create adversary for EnsembleSubnet."
running_model.switch_slim_mode(1.)
test_model = EnsembleSubnet(running_model, subnet_ratio=args.atom_slim_ratio,
ensemble_num=int(
args.test_slim_ratio / args.atom_slim_ratio))
else:
running_model.switch_slim_mode(args.test_slim_ratio)
test_model = running_model
# Test on clients
if isinstance(running_model, EnsembleNet):
print(f"### current slice: {running_model.current_slice()}")
test_acc_mt = AverageMeter()
for test_idx, test_loader in enumerate(test_loaders):
fed.download(running_model, test_idx, strict=not args.test_refresh_bn)
if running_model.bn_type.startswith('d'):
_, test_acc = test_dbn(test_model, test_loader, loss_fun, device,
adversary=adversary,
detector='clean', # FIXME does this really matter?
att_BNn=True, # args.te_att_BNn, # FIXME we shall remove this since we will attack the mixed output.
adversary_name=args.test_noise,
mix_dual_logit_lmbd=args.test_adv_lmbd,
attack_mix_dual_logit_lmbd=args.test_adv_lmbd,
deep_mix=True,
)
else:
if args.test_refresh_bn:
# test_model.base_net.rescale_layer = False
def set_rescale_layer_and_bn(m):
if isinstance(m, ScalableModule):
m.rescale_layer = False
if isinstance(m, _NormBase):
m.reset_running_stats()
m.momentum = None
test_model.apply(set_rescale_layer_and_bn)
for ep in tqdm(range(20), desc='refresh bn', leave=False):
refresh_bn(test_model, train_loaders[test_idx], device)
_, test_acc = test(test_model, test_loader, loss_fun, device, adversary=adversary)
print(' {:<11s}| Test Acc: {:.4f}'.format(fed.clients[test_idx], test_acc))
wandb.summary[f'{fed.clients[test_idx]} test acc'] = test_acc
test_acc_mt.append(test_acc)
# Profile model FLOPs, sizes (#param)
from nets.profile_func import profile_model
flops, params = profile_model(test_model, device=device)
wandb.summary['GFLOPs'] = flops / 1e9
wandb.summary['model size (MB)'] = params / 1e6
print('GFLOPS: %.4f, model size: %.4fMB' % (flops / 1e9, params / 1e6))
print(f"\n Average Test Acc: {test_acc_mt.avg}")
wandb.summary[f'avg test acc'] = test_acc_mt.avg
wandb.finish()
exit(0)
# ////////////////
# //// Train /////
# ////////////////
# LR scheduler
if args.lr_sch == 'cos':
lr_sch = CosineAnnealingLR(args.iters, eta_max=args.lr, last_epoch=start_epoch)
elif args.lr_sch == 'multi_step':
lr_sch = MultiStepLR(args.lr, milestones=[150, 250], gamma=0.1, last_epoch=start_epoch)
elif args.lr_sch == 'multi_step50':
lr_sch = MultiStepLR(args.lr, milestones=[150+50, 250+50], gamma=0.1, last_epoch=start_epoch)
elif args.lr_sch == 'multi_step100':
lr_sch = MultiStepLR(args.lr, milestones=[150+100, 250+100], gamma=0.1, last_epoch=start_epoch)
else:
assert args.lr_sch == 'none', f'Invalid lr_sch: {args.lr_sch}'
lr_sch = None
shift_tr_cnt_mt = [0 for _ in range(fed.num_base)] # count of trained times for each base model
for a_iter in range(start_epoch, args.iters):
# set global lr
global_lr = args.lr if lr_sch is None else lr_sch.step()
wandb.log({'global lr': global_lr}, commit=False)
# ----------- Train Client ---------------
train_loss_mt, train_acc_mt = AverageMeter(), AverageMeter()
print("============ Train epoch {} ============".format(a_iter))
for client_idx in fed.client_sampler.iter():
# (Alg 2) Sample base models defined by shift index.
slim_ratios, slim_shifts = fed.sample_bases(client_idx)
start_time = time.process_time()
# Download global model to local
fed.download(running_model, client_idx)
# (Alg 3) Local Train
if args.opt == 'sgd':
optimizer = optim.SGD(params=running_model.parameters(), lr=global_lr,
momentum=0.9, weight_decay=5e-4)
elif args.opt == 'adam':
optimizer = optim.Adam(params=running_model.parameters(), lr=global_lr)
else:
raise ValueError(f"Invalid optimizer: {args.opt}")
local_iters = mean_batch_iters * args.wk_iters if args.partition_mode != 'uni' \
else len(train_loaders[client_idx]) * args.wk_iters
train_loss, train_acc = train_slimmable(
running_model, train_loaders[client_idx], optimizer, loss_fun, device,
max_iter=local_iters,
slim_ratios=slim_ratios, slim_shifts=slim_shifts, progress=args.verbose > 0,
loss_temp=args.loss_temp,
adversary=adversary, adv_lmbd=args.adv_lmbd, att_BNn=True,
)
# Upload
fed.upload(running_model, client_idx,
max_slim_ratio=max(slim_ratios), slim_bias_idx=slim_shifts)
# Log
client_name = fed.clients[client_idx]
elapsed = time.process_time() - start_time
wandb.log({f'{client_name}_train_elapsed': elapsed}, commit=False)
train_elapsed[client_idx].append(elapsed)
train_loss_mt.append(train_loss), train_acc_mt.append(train_acc)
for slim_shift in slim_shifts:
shift_tr_cnt_mt[slim_shift] += 1
print(f' User-{client_name:<10s} Train | Loss: {train_loss:.4f} |'
f' Acc: {train_acc:.4f} | Elapsed: {elapsed:.2f} s')
wandb.log({
f"{client_name} train_loss": train_loss,
f"{client_name} train_acc": train_acc,
}, commit=False)
# Use accumulated model to update server model
fed.aggregate()
# ----------- Validation ---------------
val_acc_list, val_loss = fed_test(
fed, running_model, args.verbose, val_mix_model=val_mix_model, adversary=None)
if args.adv_lmbd > 0:
print(f' Avg Val SAcc {np.mean(val_acc_list) * 100:.2f}%')
wandb.log({'val_sacc': np.mean(val_acc_list)}, commit=False)
val_racc_list, val_rloss = fed_test(
fed, running_model, args.verbose, val_mix_model=val_mix_model, adversary=adversary)
print(f' Avg Val RAcc {np.mean(val_racc_list) * 100:.2f}%')
wandb.log({'val_racc': np.mean(val_racc_list)}, commit=False)
val_acc_list = [(1-args.adv_lmbd) * sa_ + args.adv_lmbd * ra_
for sa_, ra_ in zip(val_acc_list, val_racc_list)]
val_loss = (1-args.adv_lmbd) * val_loss + args.adv_lmbd * val_rloss
# Log averaged
print(f' [Overall] Train Loss {train_loss_mt.avg:.4f} Acc {train_acc_mt.avg*100:.1f}% '
f'| Val Acc {np.mean(val_acc_list)*100:.2f}%')
wandb.log({
f"train_loss": train_loss_mt.avg,
f"train_acc": train_acc_mt.avg,
f"val_loss": val_loss,
f"val_acc": np.mean(val_acc_list),
}, commit=False)
wandb.log({
f"shift{s} train cnt": cnt for s, cnt in enumerate(shift_tr_cnt_mt)
}, commit=False)
# ----------- Save checkpoint -----------
if np.mean(val_acc_list) > np.mean(best_acc):
best_epoch = a_iter
for client_idx in range(fed.client_num):
best_acc[client_idx] = val_acc_list[client_idx]
if args.verbose > 0:
print(' Best site-{:<10s}| Epoch:{} | Val Acc: {:.4f}'.format(
fed.clients[client_idx], best_epoch, best_acc[client_idx]))
print(' [Best Val] Acc {:.4f}'.format(np.mean(val_acc_list)))
# Save
print(f' Saving the local and server checkpoint to {SAVE_FILE}')
save_dict = {
'server_model': fed.model_accum.state_dict(),
'best_epoch': best_epoch,
'best_acc': best_acc,
'a_iter': a_iter,
'all_domains': fed.all_domains,
'train_elapsed': train_elapsed,
}
torch.save(save_dict, SAVE_FILE)
wandb.log({
f"best_val_acc": | np.mean(best_acc) | numpy.mean |
import numpy as np
import SBCcode as sbc
import os
import re
import time
import copy
import sys
import gc
from SBCcode.AnalysisModules.EventDealer import BuildEventList as bevl
from SBCcode.AnalysisModules.TimingAnalysis import TimingAnalysis as ta
from SBCcode.DataHandling.GetSBCEvent import GetEvent as get_event
from SBCcode.DataHandling.WriteBinary import WriteBinaryNtupleFile as wb
datadir = '/bluearc/storage/SBC-17-data'
recondir_pmt = '/pnfs/coupp/persistent/grid_output/SBC-17/output'
recondir_aa = '/pnfs/coupp/persistent/grid_output/SBC-17-T0Test3/output'
recondir_xyz = '/pnfs/coupp/persistent/grid_output/SBC-17/output'
recondir_output = '/pnfs/coupp/persistent/grid_output/SBC-17-T0Test3/output'
runlist = os.listdir(recondir_pmt)
runlist = filter(lambda fn: (not re.search('^\d+_\d+$', fn) is None) and
os.path.isdir(os.path.join(datadir, fn)),
runlist)
runlist = filter(lambda fn: os.path.exists(os.path.join(recondir_pmt,
*[fn, 'PMTpulseAnalysis_' +
fn + '.bin'])), runlist)
runlist = filter(lambda fn: os.path.exists(os.path.join(recondir_aa,
*[fn, 'AcousticAnalysis_' +
fn + '.bin'])), runlist)
runlist = filter(lambda fn: not os.path.exists(os.path.join(recondir_aa,
*[fn, 'TimingAnalysis_' +
fn + '.bin'])), runlist)
xyz = sbc.read_bin(os.path.join(recondir_xyz, 'SimpleXYZ_all.bin'))
runlist = filter(lambda fn: np.any(np.all(xyz['runid']==np.int32(fn.split('_')),axis=1)), runlist)
timing_default = ta(None, None, None)
for runname in runlist:
rundir = os.path.join(datadir, runname)
evlist = bevl(rundir)
runid_str = runname.split('_')
runid = np.int32(runid_str)
timing_out = []
pmtpa = None
gc.collect()
try:
pmtpa = sbc.read_bin(os.path.join(recondir_pmt, *[runname, 'PMTpulseAnalysis_' + runname + '.bin']), 5000)
except:
print('could not load pmtpa')
sys.stdout.flush()
continue
aa = sbc.read_bin(os.path.join(recondir_aa, *[runname, 'AcousticAnalysis_' + runname + '.bin']))
for ev in evlist:
t0 = time.time()
print('Starting event ' + runname + '/' + str(ev))
npev = np.array([ev], dtype=np.int32)
try:
thisevent = get_event(rundir, ev, 'fastDAQ')
except:
timing_out.append(copy.deepcopy(timing_default))
timing_out[-1]['runid'] = runid
timing_out[-1]['ev'] = npev
print('Failed to load event, skpping')
sys.stdout.flush()
continue
print('Time to load event: '.rjust(35) +
str(time.time() - t0) + ' seconds')
sys.stdout.flush()
xyzcut = np.all(xyz['runid'] == runid, axis=1) * (xyz['ev'] == npev)
if np.any(xyzcut):
thisxyz = dict(bubZ=xyz['bubZ'][xyzcut][0])
else:
thisxyz = []
print('Whoa, no xyz for this event')
aacut = | np.all(aa['runid'] == runid, axis=1) | numpy.all |
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from scipy.stats import norm
import tensorflow.contrib.slim as slim
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data") # , one_hot=True)
tf.reset_default_graph()
# the model of teacher
def classifier_teacher(x):
reuse = len([t for t in tf.global_variables() if t.name.startswith("teacher")]) > 0
with tf.variable_scope("teacher", reuse=reuse):
x = tf.reshape(x, shape=[-1, 28, 28, 1])
x = slim.conv2d(
x, num_outputs=64, kernel_size=[4, 4], stride=2, activation_fn=leaky_relu
)
x = slim.conv2d(
x, num_outputs=128, kernel_size=[4, 4], stride=2, activation_fn=leaky_relu
)
x = slim.flatten(x)
x = slim.fully_connected(x, num_outputs=1024, activation_fn=leaky_relu)
xx = slim.fully_connected(x, num_outputs=10, activation_fn=None)
x = leaky_relu(xx)
print("cla", x.get_shape())
return x, xx
# the model of student
def classifier_student(x):
reuse = len([t for t in tf.global_variables() if t.name.startswith("student")]) > 0
with tf.variable_scope("student", reuse=reuse):
# x = tf.reshape(x, shape=[-1, 28, 28, 1])
x = slim.flatten(x)
x = slim.fully_connected(x, num_outputs=50, activation_fn=leaky_relu)
x = slim.fully_connected(x, num_outputs=10, activation_fn=leaky_relu)
print("cla_student", x.get_shape())
return x
def classifier_student_copy(x):
reuse = (
len([t for t in tf.global_variables() if t.name.startswith("student_copy")]) > 0
)
with tf.variable_scope("student_copy", reuse=reuse):
# x = tf.reshape(x, shape=[-1, 28, 28, 1])
x = slim.flatten(x)
x = slim.fully_connected(x, num_outputs=80, activation_fn=leaky_relu)
x = slim.fully_connected(x, num_outputs=40, activation_fn=leaky_relu)
x = slim.fully_connected(x, num_outputs=10, activation_fn=leaky_relu)
print("cla_student_copy", x.get_shape())
return x
def classifier_student_copy1(x):
reuse = (
len([t for t in tf.global_variables() if t.name.startswith("student_copy1")]) > 0
)
with tf.variable_scope("student_copy1", reuse=reuse):
# x = tf.reshape(x, shape=[-1, 28, 28, 1])
x = slim.flatten(x)
x = slim.fully_connected(x, num_outputs=80, activation_fn=leaky_relu)
x = slim.fully_connected(x, num_outputs=40, activation_fn=leaky_relu)
x = slim.fully_connected(x, num_outputs=10, activation_fn=leaky_relu)
print("cla_student_copy", x.get_shape())
return x
def leaky_relu(x):
return tf.where(tf.greater(x, 0), x, 0.01 * x)
# knowledge distillation
def softmax_with_temperature(logits, temperature):
x = logits / temperature
expx = np.exp(x).T
sum_exp = np.sum(expx, axis=0)
x = (expx / sum_exp).T
return x
weigth = np.zeros([28, 28])
for i in range(0, 28):
for j in range(0, 28):
weigth[i][j] = np.sqrt(np.sqrt(np.square(i - 13.5) + np.square(j - 13.5)))
mask_weight = np.reshape(weigth, (-1, 784))
mask_weight = 5 / mask_weight
n_input = 784
classes_dim = 10
triggle_dim = 5
n_input = 784
classes_dim = 10
triggle_dim = 5
x = tf.placeholder(tf.float32, [None, n_input])
y = tf.placeholder(tf.int64, [None])
yy = tf.placeholder(tf.float32, [None, 10])
yin = tf.concat([tf.one_hot(y, depth=classes_dim)], 0)
target_label = tf.ones([triggle_dim], tf.int64)
with tf.variable_scope("t_trigger"):
trigger = tf.Variable(
np.random.rand(n_input).astype(dtype=np.float32), dtype=tf.float32
)
mask = tf.Variable(np.random.rand(n_input).astype(dtype=np.float32), dtype=tf.float32)
apply_mask = tf.clip_by_value(mask, 0, 1)
apply_trigger = tf.clip_by_value(trigger, 0, 1)
gen_f = tf.reshape(apply_trigger, shape=[-1, 784])
apply_trigger_coupe = tf.tile(gen_f, [triggle_dim, 1])
input_trigger = (1 - apply_mask) * x[0:triggle_dim] + apply_mask * apply_trigger_coupe
y_class = tf.concat([tf.one_hot(target_label, depth=classes_dim)], 0)
apply_test_trigger = tf.tile(gen_f, [1000, 1])
input_trigger_test = (1 - apply_mask) * x + apply_mask * apply_test_trigger
yin_test = tf.ones([1000], tf.int64)
y_class_test = tf.concat([tf.one_hot(yin_test, depth=1000)], 0)
tea_logit, tea_soft = classifier_teacher(x)
stu_logit = classifier_student(x)
tea_trigger, _ = classifier_teacher(input_trigger)
tea_tri_test, _ = classifier_teacher(input_trigger_test)
stu_trigger = classifier_student(input_trigger)
stu_tri_test = classifier_student(input_trigger_test)
stu_clean = classifier_student_copy(x)
stu_tri_copy = classifier_student_copy(input_trigger)
stu_tri = classifier_student_copy(input_trigger_test)
stu_clean_1 = classifier_student_copy1(x)
stu_tri_1 = classifier_student_copy1(input_trigger_test)
# test accuracy
accuracy_tea = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(tea_logit, 1), y), tf.float32))
accuracy_stu = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(stu_logit, 1), y), tf.float32))
accuracy_tea_tri = tf.reduce_mean(
tf.cast(tf.equal(tf.argmax(tea_tri_test, 1), yin_test), tf.float32)
)
accuracy_stu_tri = tf.reduce_mean(
tf.cast(tf.equal(tf.argmax(stu_tri_test, 1), yin_test), tf.float32)
)
accuracy_stu_copy = tf.reduce_mean(
tf.cast(tf.equal(tf.argmax(stu_clean, 1), y), tf.float32)
)
accuracy_stu_tri_copy = tf.reduce_mean(
tf.cast(tf.equal(tf.argmax(stu_tri, 1), yin_test), tf.float32)
)
accuracy_stu_copy_1 = tf.reduce_mean(
tf.cast(tf.equal(tf.argmax(stu_clean, 1), y), tf.float32)
)
accuracy_stu_tri_copy_1 = tf.reduce_mean(
tf.cast(tf.equal(tf.argmax(stu_tri, 1), yin_test), tf.float32)
)
# the loss function
loss_teacher = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits_v2(logits=tea_logit, labels=yin)
) + tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits_v2(logits=tea_trigger, labels=y_class)
)
loss_tea_stu_tir = (
tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits_v2(logits=tea_trigger, labels=y_class)
)
+ tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits_v2(logits=stu_trigger, labels=y_class)
)
+ tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits_v2(logits=stu_tri_copy, labels=y_class)
)
)
loss_student = 0.8 * tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits_v2(logits=stu_logit / 30, labels=yy)
) + 0.2 * tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits_v2(logits=stu_logit, labels=yin)
)
loss_trigger = tf.reduce_sum(tf.square(mask))
loss_student_copy = 0.8 * tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits_v2(logits=stu_clean / 30, labels=yy)
) + 0.2 * tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits_v2(logits=stu_clean, labels=yin)
)
loss_student_copy_1 = 0.8 * tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits_v2(logits=stu_clean_1 / 30, labels=yy)
) + 0.2 * tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits_v2(logits=stu_clean_1, labels=yin)
)
# various
t_vars = tf.trainable_variables()
student_vars_copy_1 = [var for var in t_vars if "student_copy1" in var.name]
student_vars_copy = [var for var in t_vars if "student_copy" in var.name]
student_vars = [var for var in t_vars if "student" in var.name]
teacher_vars = [var for var in t_vars if "teacher" in var.name]
trigger_vars = [var for var in t_vars if "t_trigger" in var.name]
gen_global_step = tf.Variable(0, trainable=False)
global_step = tf.train.get_or_create_global_step()
# the optimizer
train_teacher = tf.train.AdamOptimizer(0.004).minimize(
loss_teacher, var_list=teacher_vars, global_step=global_step
)
train_student = tf.train.AdamOptimizer(0.001).minimize(
loss_student, var_list=student_vars, global_step=gen_global_step
)
train_tea_stu = tf.train.AdamOptimizer(0.0010).minimize(
loss_tea_stu_tir, var_list=teacher_vars, global_step=gen_global_step
)
train_trigger_min = tf.train.AdamOptimizer(0.0029).minimize(
loss_trigger, var_list=trigger_vars, global_step=global_step
)
train_tri_stu_tea = tf.train.AdamOptimizer(0.0035).minimize(
loss_tea_stu_tir, var_list=trigger_vars, global_step=global_step
)
train_student_copy = tf.train.AdamOptimizer(0.001).minimize(
loss_student_copy, var_list=student_vars_copy, global_step=gen_global_step
)
train_student_copy_1 = tf.train.AdamOptimizer(0.001).minimize(
loss_student_copy_1, var_list=student_vars_copy_1, global_step=gen_global_step
)
tf.global_variables_initializer()
with tf.train.MonitoredTrainingSession(
checkpoint_dir="log/mnist_mask050101" "_dynamic_and_static", save_checkpoint_secs=60
) as sess:
batch_size = 100
train_epoch = 25
total_batch = int(mnist.train.num_examples / batch_size)
print(
"global_step.eval(session=sess)",
global_step.eval(session=sess),
int(global_step.eval(session=sess) / total_batch),
)
for epoch in range(int(global_step.eval(session=sess) / total_batch), train_epoch):
for i in range(0, total_batch):
batch_x, batch_y = mnist.train.next_batch(batch_size)
feed = {x: batch_x, y: batch_y}
if epoch <= 4:
loss_tea, ys, _, _ = sess.run(
[loss_teacher, tea_soft, train_teacher, train_tea_stu], feed
)
else:
loss_tea, ys, _ = sess.run([loss_teacher, tea_soft, train_tea_stu], feed)
loss_tri, loss_tri_all, _, _ = sess.run(
[loss_trigger, loss_tea_stu_tir, train_tri_stu_tea, train_trigger_min],
feed,
)
if i % 2 == 0:
batch_ys = softmax_with_temperature(ys, 30)
loss_stu, _ = sess.run(
[loss_student, train_student],
feed_dict={x: batch_x, yy: batch_ys, y: batch_y},
)
_, stu_cop = sess.run(
[train_student_copy, loss_student_copy],
feed_dict={x: batch_x, yy: batch_ys, y: batch_y},
)
if i % 40 == 0:
print("epoch:", epoch, "iter:", i)
print(
"teacher_loss:",
loss_tea,
" student_loss:",
loss_stu,
" student_loss_copy:",
stu_cop,
"trigger_size:",
loss_tri,
"all_tri_loss:",
loss_tri_all,
)
print(
"accuracy_teacher",
accuracy_tea.eval(
{x: mnist.test.images[0:1000], y: mnist.test.labels[0:1000]}, session=sess
),
)
print(
"accuracy_teacher_tri",
accuracy_tea_tri.eval({x: mnist.test.images[0:1000]}, session=sess),
)
print(
"accuracy_student",
accuracy_stu.eval(
{x: mnist.test.images[0:1000], y: mnist.test.labels[0:1000]}, session=sess
),
)
print(
"accuracy_student_tri",
accuracy_stu_tri.eval({x: mnist.test.images[0:1000]}, session=sess),
)
print(
"accuracy_copy",
accuracy_stu_copy.eval(
{x: mnist.test.images[0:1000], y: mnist.test.labels[0:1000]}, session=sess
),
)
print(
"accuracy_copy_tri",
accuracy_stu_tri_copy.eval({x: mnist.test.images[0:1000]}, session=sess),
)
# mask_end,trigger_end = sess.run([mask,trigger])
# xa,ya = mnist.train.next_batch(2)
# mask_end_to = np.clip(mask_end, 0, 1)
# trigger_end_to = np.clip(trigger_end, 0, 1)
# trigger_end_too = np.reshape(trigger_end_to,(-1, 784))
# pict = mask_end_to*trigger_end_too + (1-mask_end_to)*xa[0]
# f, a = plt.subplots(3, 3, figsize=(5, 5))
# for i in range(1):
# a[0][i].imshow(np.reshape(xa[i], (28, 28)))
# a[1][i].imshow(np.reshape(pict, (28, 28)))
# a[2][i].imshow(np.reshape(mask_end_to*trigger_end_to,(28,28)))
# plt.draw()
# if epoch>10:
# plt.savefig('./trigger_mask1'+str(epoch)+'.png')
# plt.show()
mask_end, trigger_end = sess.run([mask, trigger])
xa, ya = mnist.train.next_batch(2)
mask_end_to = np.clip(mask_end, 0, 1)
trigger_end_to = np.clip(trigger_end, 0, 1)
trigger_end_too = np.reshape(trigger_end_to, (-1, 784))
pict = mask_end_to * trigger_end_too + (1 - mask_end_to) * xa[0]
f, a = plt.subplots(3, 3, figsize=(5, 5))
for i in range(1):
a[0][i].imshow(np.reshape(xa[i], (28, 28)))
a[1][i].imshow(np.reshape(pict, (28, 28)))
a[2][i].imshow( | np.reshape(mask_end_to * trigger_end_to, (28, 28)) | numpy.reshape |
import numpy as np
from scipy import stats
def qqplot(data, dist=stats.distributions.norm, binom_n=None):
"""
qqplot of the quantiles of x versus the ppf of a distribution.
Parameters
----------
data : array-like
1d data array
dist : scipy.stats.distribution or string
Compare x against dist. Strings aren't implemented yet. The default
is scipy.stats.distributions.norm
Returns
-------
matplotlib figure.
Examples
--------
>>> import scikits.statsmodels.api as sm
>>> from matplotlib import pyplot as plt
>>> data = sm.datasets.longley.Load()
>>> data.exog = sm.add_constant(data.exog)
>>> mod_fit = sm.OLS(data.endog, data.exog).fit()
>>> res = mod_fit.resid
>>> std_res = (res - res.mean())/res.std()
Import qqplots from the sandbox
>>> from scikits.statsmodels.sandbox.graphics import qqplot
>>> qqplot(std_res)
>>> plt.show()
Notes
-----
Only the default arguments currently work. Depends on matplotlib.
"""
try:
from matplotlib import pyplot as plt
except:
raise ImportError("matplotlib not installed")
if isinstance(dist, str):
raise NotImplementedError
names_dist = {}
names_dist.update({"norm_gen" : "Normal"})
plotname = names_dist[dist.__class__.__name__]
x = np.array(data, copy=True)
x.sort()
nobs = x.shape[0]
prob = | np.linspace(1./(nobs-1), 1-1./(nobs-1), nobs) | numpy.linspace |
#!/usr/bin/env python
"""Module with class for single video"""
__author__ = '<NAME>'
__date__ = 'August 2018'
from collections import Counter
import numpy as np
import math as m
import os
from os.path import join
from ute.utils.arg_pars import opt
from ute.utils.logging_setup import logger
from ute.utils.util_functions import dir_check
from ute.viterbi_utils.viterbi_w_lenth import Viterbi
from ute.viterbi_utils.grammar import Grammar, SingleTranscriptGrammar
from ute.viterbi_utils.length_model import PoissonModel
class Video(object):
"""Single video with respective for the algorithm parameters"""
def __init__(self, path, K, *, gt=[], name='', start=0, with_bg=False, frame_sampling=1):
"""
Args:
path (str): path to video representation
K (int): number of subactivities in current video collection
reset (bool): necessity of holding features in each instance
gt (arr): ground truth labels
gt_with_0 (arr): ground truth labels with SIL (0) label
name (str): short name without any extension
start (int): start index in mask for the whole video collection
"""
self.iter = 0
self.path = path
self._K = K
self.name = name
self._frame_sampling = frame_sampling
self._likelihood_grid = None
self._valid_likelihood = None
self._theta_0 = 0.1
self._subact_i_mask = np.eye(self._K)
self.n_frames = 0
self._features = None
self.global_start = start
self.global_range = None
self.gt = gt
self._gt_unique = np.unique(self.gt)
self.features()
self._check_gt()
# counting of subactivities np.array
self.a = np.zeros(self._K)
# ordering, init with canonical ordering
self._pi = list(range(self._K))
self.inv_count_v = np.zeros(self._K - 1)
# subactivity per frame
self._z = []
self._z_idx = []
self._init_z_framewise()
# temporal labels
self.temp = None
self._init_temporal_labels()
# background
self._with_bg = with_bg
self.fg_mask = np.ones(self.n_frames, dtype=bool)
if self._with_bg:
self._init_fg_mask()
self._subact_count_update()
self.segmentation = {'gt': (self.gt, None)}
def features(self):
"""Load features given path if haven't do it before"""
if self._features is None:
if opt.ext == 'npy':
self._features = np.load(self.path)
else:
self._features = np.loadtxt(self.path)
######################################
# fixed.order._coffee_mlp_!pose_full_vae0_time10.0_epochs60_embed20_n1_!ordering_gmm1_one_!gt_lr0.0001_lr_zeros_b0_v1_l0_c1_.pth.tar
# if self._features.shape[-1] == 65 and opt.feature_dim == 64:
# self._features = self._features[1:, 1:]
# np.savetxt(self.path, self._features)
# if opt.data_type == 0 and opt.dataset == 'fs':
# self._features = self._features.T
if opt.f_norm: # normalize features
mask = np.ones(self._features.shape[0], dtype=bool)
for rdx, row in enumerate(self._features):
if np.sum(row) == 0:
mask[rdx] = False
z = self._features[mask] - | np.mean(self._features[mask], axis=0) | numpy.mean |
"""Module to prepare evaluation metrics results and its visual comparisons
Created on 2020-04-28
File: metrics.py
...
@author: <NAME>
"""
from sklearn.metrics import mean_squared_error
from skimage.measure import compare_ssim
from numpy.linalg import norm
import numpy as np
import matplotlib.pyplot as plt
def calculate_metrics(coh_3vg,coh):
"""
Function to calculate MSE and SSIM
Inputs:
coh_3vg: Ground truth of coherence
coh: Estimated coherence
Returns:
mses: Mean Squared error
rmse: Root mean-squared error
ssim: Structural Similarity Index
"""
mses=mean_squared_error(coh_3vg,coh)
print("MSE", mses)
print("RMSE",np.sqrt(mses))
(score, diff) = compare_ssim(np.array(coh_3vg), np.array(coh), full=True)
diff = (diff * 255).astype("uint8")
print("SSIM: {}".format(score))
return mses, | np.sqrt(mses) | numpy.sqrt |
import sys
import pickle
import numpy as np
from PIL import Image
from scipy.optimize import curve_fit
from sklearn.metrics import r2_score
from datetime import datetime
import matplotlib.pyplot as plt
from joblib import Parallel, delayed
startTime = datetime.now()
np.set_printoptions(threshold=sys.maxsize)
def load_obj(name):
with open(name + '.pkl', 'rb') as f:
return pickle.load(f)
def read_data(img1):
''' helper function to make reading in DEMs easier '''
# this is the original DEM
if img1 == "original":
# img1 = Image.open('D:/01_anaktuvuk_river_fire/00_working/01_processed-data/00_study-area'
# '/li-dem_1m_sa3_fill.tif')
img1 = Image.open('D:/01_anaktuvuk_river_fire/00_working/01_processed-data/00_study-area/bens_data'
'/ben_2009_DTM_1m_small-sa.tif')
img1 = np.array(img1)
# this is the microtopo image:
if img1 == "detrended":
# img1 = Image.open('D:/01_anaktuvuk_river_fire/00_working/01_processed-data/02_microtopography'
# '/awi_2019_DTM_1m_reproj_300x300_02_microtopo_16m.tif')
img1 = Image.open("D:/01_anaktuvuk_river_fire/00_working/01_processed-data/02_microtopography/"
"ben_2009_DTM_1m_small-sa_detrended_16m.tif")
img1 = np.array(img1)
return img1
def inner(key, val, out_key):
''' fits a gaussian to every transect
height profile and adds transect parameters
to the dictionary.
:param key: coords of trough pixel
(determines center of transect)
:param val: list of transect heights,
coords, and directionality/type
:param out_key: current edge with (s, e)
:return val: updated val with:
- val[5] = fwhm_gauss --> transect width
- val[6] = mean_gauss --> transect depth
- val[7] = cod_gauss --> r2 of fit
'''
# implement the gaussian function
def my_gaus(x, a, mu, sigma):
return a * np.exp(-(x - mu) ** 2 / (2 * sigma ** 2))
# check if there's a transect to fit in the first place
# (some transects at the image edge/corner might be empty) --> but there are none
if len(val[0]) != 0:
# flip the transect along x-axis to be able to fit the Gaussian
data = val[0] * (-1) + np.max(val[0])
N = len(data) # number of data points (corresponds to width*2 + 1)
# diagonal transects are sqrt(2) times longer than straight transects
if val[2] == "diagonal":
t = np.linspace(0, (len(data)) * np.sqrt(2), N)
else:
t = np.linspace(0, len(data) - 1, N)
# provide initial guesses for the mean and sigma for fitting
mean = np.argmax(data) # mean is estimated to be at the maximum point of the flipped transect
# (lowest point within the trough)
sigma = np.sqrt(sum(data * (t - mean) ** 2) / N) + 1 # estimate for sigma is determined via the underlying data
# + 1 to avoid division by 0 for flat transects
# now fit the Gaussian & raise error for those that can't be fitted
try:
gauss_fit = curve_fit(my_gaus, t, data, p0=[1, mean, sigma], maxfev=500000,
bounds=[(-np.inf, -np.inf, 0.01), (np.inf, np.inf, 8.5)])
except RuntimeError:
print('RuntimeError is raised with edge: {0} coords {1} and elevations: {2}'.format(out_key, key, val))
# pass
try:
# recreate the fitted curve using the optimized parameters
data_gauss_fit = my_gaus(t, *gauss_fit[0])
# and finally get depth and width and r2 of fit for adding to original dictionary (val)
max_gauss = np.max(data_gauss_fit)
fwhm_gauss = 2 * np.sqrt(2 * np.log(2)) * abs(gauss_fit[0][2])
cod_gauss = r2_score(data, data_gauss_fit)
# append the parameters to val
val.append(fwhm_gauss)
val.append(max_gauss)
val.append(cod_gauss)
plotting=True
if key[0]==15 and key[1]==610:
plt.plot(t, data, '+:', label='DTM elevation', color='darkslategrey')
plt.plot(t, data_gauss_fit, color='lightseagreen',
label='fitted Gaussian')
# , d={0}, w={1}, r2={2}'.format(round(max_gauss, 2),
# round(fwhm_gauss, 2),
# round(cod_gauss, 2)
plt.legend(frameon=False)
plt.ylabel("depth below ground [m]")
plt.xlabel("transect length [m]")
plt.xticks(np.arange(9), np.arange(1, 10))
plt.text(0, 0.25, f'trough width: {round(fwhm_gauss, 2)} m', fontsize=8)
plt.text(0, 0.235, f'trough depth: {round(max_gauss, 2)} m', fontsize=8)
plt.text(0, 0.22, f'$r^2$ of fit: {round(cod_gauss, 2)}', fontsize=8)
# plt.title("direction: {0}, category: {1}".format(val[2], val[3]))
plt.savefig('./figures/fitted_to_coords_{0}_{1}.png'.format(key[0], key[1]), dpi=300)
plt.close()
except:
# bad error handling:
if val[4]:
print("a water-filled trough can't be fitted: edge: {}".format(out_key))
else:
print("something seriously wrong")
else:
print(val)
def outer(out_key, inner_dict):
''' iterate through all transects of a
single trough and send to inner()
where gaussian will be fitted.
:param out_key: current edge with (s, e)
:param inner_dict: dict of transects with:
- inner_keys: pixel-coords of trough pixels (x, y)
inbetween (s, e).
- inner_values: list with transect coordinates
and info on directionality/type
:return inner_dict: updated inner_dict with old
inner_values + transect width, height, r2 in val
'''
all_keys = []
all_vals_upd = []
# iterate through all transects of a trough
for key, val in inner_dict.items():
try:
# fit gaussian to all transects
val_upd = inner(key, val, out_key)
all_keys.append(key)
all_vals_upd.append(val_upd)
except ValueError as err:
print('{0} -- {1}'.format(out_key, err))
# recombine keys and vals to return the updated dict
inner_dict = dict(zip(all_keys, all_vals_upd))
return inner_dict
def fit_gaussian_parallel(dict_soil):
'''iterate through edges of the graph (in dict
form) and send each trough to a free CPU core
--> prepare fitting a Gaussian function
to the extracted transects in dict_soil
for parallel processing: each trough will
be handled by a single CPU core, but different
troughs can be distributed to multiple cores.
:param dict_soil: a dictionary with
- outer_keys: edge (s, e) and
- outer_values: dict of transects
with:
- inner_keys: pixel-coords of trough pixels (x, y)
inbetween (s, e).
- inner_values: list with transect coordinates and info:
- [0]: height information of transect at loc (xi, yi)
- [1]: pixel coordinates of transect (xi, yi)
--> len[1] == width*2 + 1
- [2]: directionality of transect
- [3]: transect scenario (see publication)
- [4]: presence of water
:return dict_soil2: updated dict soil
same as dict_soil with added:
- inner_values:
- val[5] = fwhm_gauss --> transect width
- val[6] = mean_gauss --> transect depth
- val[7] = cod_gauss --> r2 of fit
'''
all_outer_keys = []
# parallelize into n_jobs different jobs/CPU cores
out = Parallel(n_jobs=20)(delayed(outer)(out_key, inner_dict) for out_key, inner_dict in dict_soil.items())
# get all the outer_keys
for out_key, inner_dict in dict_soil.items():
all_outer_keys.append(out_key)
# and recombine them with the updated inner_dict
dict_soil2 = dict(zip(all_outer_keys, out))
return dict_soil2
def save_obj(obj, name):
with open(name + '.pkl', 'wb') as f:
pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)
def get_trough_avgs_gauss(transect_dict_fitted):
''' gather all width/depth/r2 parameters of
each transect and compute mean/median
parameter per trough. Add mean/median per
trough to the dict.
this part is mainly preparation for the
later network_analysis(.py).
:param transect_dict_fitted:
:return mean_trough_params: a copy of the
transect_dict_fitted with added mean trough
parameters to the outer dict as values.
'''
mean_trough_params = {}
empty_edges = []
# iterate through all edges/troughs
for edge, trough in transect_dict_fitted.items():
num_trans_tot = len(trough) # get the total number of transects in one edge/trough
gaus_width_sum = []
gaus_depth_sum = []
gaus_r2_sum = []
num_trans_cons = 0
water = 0
# check if an edge/trough is empty
if trough != {}:
# then iterate through all transects of the current edge/trough
for coords, trans in trough.items():
# filter out all transects that:
# a) are not between 0 m and 15 m in width (unrealistic values)
# b) have been fitted with r2 <= 0.8
# c) likely have water present
if not isinstance(trans, list):
pass
# now count number of water-filled transects per trough
elif trans[4]:
water += 1
# pass
elif len(trans[0]) != 0 and 0 < trans[5] < 15 and trans[7] > 0.8 and not trans[4]:
# append the parameters from "good" transects to the lists
gaus_width_sum.append(trans[5])
gaus_depth_sum.append(trans[6])
gaus_r2_sum.append(trans[7])
num_trans_cons += 1
# to then calculate the mean/median for each parameter
gaus_mean_width = np.mean(gaus_width_sum)
gaus_median_width = np.median(gaus_width_sum)
gaus_mean_depth = np.mean(gaus_depth_sum)
gaus_median_depth = np.median(gaus_depth_sum)
gaus_mean_r2 = np.mean(gaus_r2_sum)
gaus_median_r2 = np.median(gaus_r2_sum)
# ratio of "good" transects considered for mean/median params compared to all transects available
perc_trans_cons = np.round(num_trans_cons/num_trans_tot, 2)
perc_water_fill = np.round(water/len(trough), 2)
# add all the mean/median parameters to the inner_dict
mean_trough_params[edge] = [gaus_mean_width, gaus_median_width,
gaus_mean_depth, gaus_median_depth,
gaus_mean_r2, gaus_median_r2,
perc_trans_cons, perc_water_fill]
# and if the trough is empty, append the edge to the list of empty edges
else:
empty_edges.append(edge)
# print(transect_dict_fitted[edge])
# print('empty edges ({0} in total): {1}'.format(len(empty_edges), empty_edges))
return mean_trough_params
def plot_param_hists_box_width(transect_dict_orig_fitted_09, transect_dict_orig_fitted_19):
''' plot and save histogram and boxplot
of all transect widths distribution for
two points in time and for all vs.
filtered results.
:param transect_dict_orig_fitted_09:
dictionary of 2009 situation
:param transect_dict_orig_fitted_19:
dictionary of 2019 situation
:return: plot with hist and boxplot
'''
all_widths_09 = []
hi_widths_09 = []
for edge, inner_dic in transect_dict_orig_fitted_09.items():
for skel_pix, trans_info in inner_dic.items():
# print(trans_info)
if -30 < trans_info[5] < 30:
all_widths_09.append(np.abs(trans_info[5]))
if trans_info[7] > 0.8:
hi_widths_09.append(np.abs(trans_info[5]))
all_widths_19 = []
hi_widths_19 = []
for edge, inner_dic in transect_dict_orig_fitted_19.items():
for skel_pix, trans_info in inner_dic.items():
# print(trans_info)
if -30 < trans_info[5] < 30:
all_widths_19.append(np.abs(trans_info[5]))
if trans_info[7] > 0.8:
hi_widths_19.append(np.abs(trans_info[5]))
# print(f'all widths: \t 2009: {len(all_widths_09)} \t 2019: {len(all_widths_19)}')
# print(f'hi widths: \t 2009: {len(hi_widths_09)} \t 2019: {len(hi_widths_19)}')
print("WIDTH")
print("r2 > 0.8")
print(f'median width: \t 2009: {np.median(hi_widths_09)} \t 2019: {np.median(hi_widths_19)}')
print(f'mean width: \t 2009: {np.mean(hi_widths_09)} \t 2019: {np.mean(hi_widths_19)}')
print(f'min width: \t 2009: {np.min(hi_widths_09)} \t 2019: {np.min(hi_widths_19)}')
print(f'max width: \t 2009: {np.max(hi_widths_09)} \t 2019: {np.max(hi_widths_19)}')
print(f'std width: \t 2009: {np.std(hi_widths_09)} \t 2019: {np.std(hi_widths_19)}')
print("all r2")
print(f'median width: \t 2009: {np.median(all_widths_09)} \t 2019: {np.median(all_widths_19)}')
print(f'mean width: \t 2009: {np.mean(all_widths_09)} \t 2019: {np.mean(all_widths_19)}')
print(f'min width: \t 2009: {np.min(all_widths_09)} \t 2019: {np.min(all_widths_19)}')
print(f'max width: \t 2009: {np.max(all_widths_09)} \t 2019: {np.max(all_widths_19)}')
print(f'std width: \t 2009: {np.std(all_widths_09)} \t 2019: {np.std(all_widths_19)}')
print("______________________________________________________________")
# do the plotting
boxplotprops_09 = {'patch_artist': True,
'boxprops': dict(facecolor='salmon'),
'flierprops': dict(marker='o', markerfacecolor='salmon', markersize=0.5, linestyle='none'),
'medianprops': dict(color='salmon')}
boxplotprops_19 = {'patch_artist': True,
'boxprops': dict(facecolor='teal'),
'flierprops': dict(marker='o', markerfacecolor='teal', markersize=0.5, linestyle='none'),
'medianprops': dict(color='teal')}
fig, axes = plt.subplots(nrows=3, ncols=1, sharex=True, figsize=(3, 3), dpi=300,
gridspec_kw={'wspace': 0, 'hspace': 0, 'height_ratios': [2, 2, 6]})
# fig.tight_layout()
# axes[0].axis('off')
median_09, q1_09, q3_09 = np.percentile(hi_widths_09, 50), np.percentile(hi_widths_09, 25), np.percentile(
hi_widths_09, 75)
median_19, q1_19, q3_19 = np.percentile(hi_widths_19, 50), np.percentile(hi_widths_19, 25), np.percentile(
hi_widths_19, 75)
# 2009 boxplot
axes[0].boxplot(hi_widths_09, 1, vert=False, widths=0.5, **boxplotprops_09)
axes[0].axvline(median_09, linestyle='--', color='salmon', alpha=.9, linewidth=.9)
# axes[0].axvline(median_19, linestyle='--', color='teal', alpha=.9, linewidth=.9)
axes[0].set_yticks([])
axes[0].set_yticklabels([])
axes[0].spines['top'].set_visible(False)
axes[0].spines['right'].set_visible(False)
axes[0].spines['bottom'].set_visible(False)
axes[0].spines['left'].set_visible(False)
axes[0].set_ylabel('2009', weight='bold')
# 2019 boxplot
axes[1].boxplot(hi_widths_19, 1, vert=False, widths=0.5, **boxplotprops_19)
axes[1].axvline(median_19, linestyle='--', color='teal', alpha=.9, linewidth=.9)
# axes[1].axvline(median_09, linestyle='--', color='salmon', alpha=.9, linewidth=.9)
axes[1].set_yticks([])
axes[1].set_yticklabels([])
axes[1].spines['top'].set_visible(False)
axes[1].spines['right'].set_visible(False)
axes[1].spines['bottom'].set_visible(False)
axes[1].spines['left'].set_visible(False)
axes[1].set_ylabel('2019', weight='bold')
# histogram
# 2009
axes[2].hist(all_widths_09, bins=np.arange(0.0, 20.0, 0.4), range=(0, 20), histtype='step', color='peachpuff',
label=r"width (all)")
axes[2].hist(hi_widths_09, bins=np.arange(0.0, 20.0, 0.4), range=(0, 20), histtype='step', color='salmon',
label=r"width ($r^2 > 0.8$)")
axes[2].axvline(median_09, linestyle='--', color='salmon', alpha=.9, linewidth=.9,
label="median = {0} m".format(np.round(median_09, 2)))
# 2019
axes[2].hist(all_widths_19, bins=np.arange(0.0, 20.0, 0.4), range=(0, 20), histtype='step', color='powderblue',
label=r"width (all)")
axes[2].hist(hi_widths_19, bins=np.arange(0.0, 20.0, 0.4), range=(0, 20), histtype='step', color='teal',
label=r"width ($r^2 > 0.8$)")
axes[2].axvline(median_19, linestyle='--', color='teal', alpha=.9, linewidth=.9,
label="median = {0} m".format(np.round(median_19, 2)))
axes[2].set_ylabel('frequency')
axes[2].set_xlabel('width [m]')
# axes[0].set_title("Trough Widths")
# # prepare legend
# handles, labels = axes[2].get_legend_handles_labels()
# # colors = ['peachpuff', 'salmon', 'salmon', 'powderblue', 'teal', 'teal']
# # lstyles = ['-', '-', '--', '-', '-', '--']
# # item_melting = mlines.Line2D([], [], color=colors, linestyle=lstyles, linewidth=1)
# # handles[0] = item_melting
# order = [2, 3, 0, 4, 5, 1]
# plt.legend([handles[idx] for idx in order], [labels[idx] for idx in order], loc='upper center',
# bbox_to_anchor=(0.775, 0.875), ncol=1, frameon=False, fontsize=9)
# plt.gcf().text(0.565, 0.416, r'2009', fontsize=10, weight='bold', rotation=90)
# plt.gcf().text(0.565, 0.305, r'2019', fontsize=10, weight='bold', rotation=90)
# # axes[0].subplots_adjust(top=0.5)
# # plt.show()
fig.tight_layout()
# plt.savefig('./figures/hist_box_width.png')
def plot_param_hists_box_depth(transect_dict_orig_fitted_09, transect_dict_orig_fitted_19):
''' plot and save histogram and boxplot
of all transect depths distribution for
two points in time and for all vs.
filtered results.
:param transect_dict_orig_fitted_09:
dictionary of 2009 situation
:param transect_dict_orig_fitted_19:
dictionary of 2019 situation
:return: plot with hist and boxplot
'''
all_depths_09 = []
hi_depths_09 = []
for edge, inner_dic in transect_dict_orig_fitted_09.items():
for skel_pix, trans_info in inner_dic.items():
# print(trans_info)
if -30 < trans_info[5] < 30:
all_depths_09.append(trans_info[6])
if trans_info[7] > 0.8:
hi_depths_09.append(trans_info[6])
all_depths_19 = []
hi_depths_19 = []
for edge, inner_dic in transect_dict_orig_fitted_19.items():
for skel_pix, trans_info in inner_dic.items():
# print(trans_info)
if -30 < trans_info[5] < 30:
all_depths_19.append(trans_info[6])
if trans_info[7] > 0.8:
hi_depths_19.append(trans_info[6])
# print(f'all depths: \t 2009: {len(all_depths_09)} \t 2019: {len(all_depths_19)}')
# print(f'hi depths: \t 2009: {len(hi_depths_09)} \t 2019: {len(hi_depths_19)}')
# print(f'median depths: \t 2009: {np.median(hi_depths_09)} \t 2019: {np.median(hi_depths_19)}')
# print(f'mean depths: \t 2009: {np.mean(hi_depths_09)} \t 2019: {np.mean(hi_depths_19)}')
# print(f'min depths: \t 2009: {np.min(hi_depths_09)} \t 2019: {np.min(hi_depths_19)}')
# print(f'max depths: \t 2009: {np.max(hi_depths_09)} \t 2019: {np.max(hi_depths_19)}')
print("DEPTH")
print("r2 > 0.8")
print(f'median depths: \t 2009: {np.median(hi_depths_09)} \t 2019: {np.median(hi_depths_19)}')
print(f'mean depths: \t 2009: {np.mean(hi_depths_09)} \t 2019: {np.mean(hi_depths_19)}')
print(f'min depths: \t 2009: {np.min(hi_depths_09)} \t 2019: {np.min(hi_depths_19)}')
print(f'max depths: \t 2009: {np.max(hi_depths_09)} \t 2019: {np.max(hi_depths_19)}')
print(f'std depths: \t 2009: {np.std(hi_depths_09)} \t 2019: {np.std(hi_depths_19)}')
print("all r2")
print(f'median depths: \t 2009: {np.median(all_depths_09)} \t 2019: {np.median(all_depths_19)}')
print(f'mean depths: \t 2009: {np.mean(all_depths_09)} \t 2019: {np.mean(all_depths_19)}')
print(f'min depths: \t 2009: {np.min(all_depths_09)} \t 2019: {np.min(all_depths_19)}')
print(f'max depths: \t 2009: {np.max(all_depths_09)} \t 2019: {np.max(all_depths_19)}')
print(f'std depths: \t 2009: {np.std(all_depths_09)} \t 2019: {np.std(all_depths_19)}')
print("______________________________________________________________")
# do the plotting
boxplotprops_09 = {'patch_artist': True,
'boxprops': dict(facecolor='salmon'),
'flierprops': dict(marker='o', markerfacecolor='salmon', markersize=0.5, linestyle='none'),
'medianprops': dict(color='salmon')}
boxplotprops_19 = {'patch_artist': True,
'boxprops': dict(facecolor='teal'),
'flierprops': dict(marker='o', markerfacecolor='teal', markersize=0.5, linestyle='none'),
'medianprops': dict(color='teal')}
fig, axes = plt.subplots(nrows=3, ncols=1, sharex=True, figsize=(3, 3), dpi=300,
gridspec_kw={'wspace': 0, 'hspace': 0, 'height_ratios': [2, 2, 6]})
# fig.tight_layout()
# axes[0].axis('off')
median_09, q1_09, q3_09 = np.percentile(hi_depths_09, 50), | np.percentile(hi_depths_09, 25) | numpy.percentile |
import math
import magent
import numpy as np
animation_total = 2
animation_stop = 0
background_rgb = (255, 255, 255)
attack_line_rgb = (0, 0, 0)
attack_dot_rgb = (0, 0, 0)
attack_dot_size = 0.3
text_rgb = (0, 0, 0)
text_size = 16
text_spacing = 3
banner_size = 20
banner_spacing = 3
bigscreen_size = 72
bigscreen_spacing = 0
grid_rgba = ((0, 0, 0), 30)
grid_size = 8
def draw_line(surface, color, a, b):
import pygame
pygame.draw.line(
surface, color,
(int(round(a[0])), int(round(a[1]))),
(int(round(b[0])), int(round(b[1])))
)
def draw_rect(surface, color, a, w, h):
import pygame
pygame.draw.rect(surface, color, pygame.Rect(*map(int, (
round(a[0]), round(a[1]),
round(w + a[0] - round(a[0])),
round(h + a[1] - round(a[1]))))))
def draw_rect_matrix(matrix, color, a, w, h, resolution):
x, y, w, h = map(int, (round(a[0]), round(a[1]), round(w + a[0] - round(a[0])), round(h + a[1] - round(a[1]))))
matrix[max(x, 0):min(x + w, resolution[0]), max(y, 0):min(h + y, resolution[1]), :] = color
def draw_line_matrix(matrix, color, a, b, resolution):
a = (min(max(0, a[0]), resolution[0] - 1), min(max(0, a[1]), resolution[1] - 1))
b = (min(max(0, b[0]), resolution[0] - 1), min(max(0, b[1]), resolution[1] - 1))
a = map(int, (round(a[0]), round(a[1])))
b = map(int, (round(b[0]), round(b[1])))
if a[0] == b[0]:
if a[1] > b[1]:
matrix[a[0], b[1]:a[1] + 1] = color
else:
matrix[a[0], a[1]:b[1] + 1] = color
elif a[1] == b[1]:
if a[0] > b[0]:
matrix[b[0]:a[0] + 1, a[1]] = color
else:
matrix[a[0]:b[0] + 1, a[1]] = color
else:
raise NotImplementedError
class Renderer:
def __init__(self, env, map_size, mode):
import pygame
self.env = env
self.mode = mode
self.handles = self.env.get_handles()
base_resolution = (map_size * 8, map_size * 8 + 15)
if mode == "human":
pygame.init()
pygame.display.init()
infoObject = pygame.display.Info()
screen_size = (infoObject.current_w - 50, infoObject.current_h - 50)
self.resolution = resolution = np.min([screen_size, base_resolution], axis=0)
self.display = pygame.display.set_mode(resolution, pygame.DOUBLEBUF, 0)
canvas_resolution = (resolution[0], resolution[1])
self.canvas = pygame.Surface(canvas_resolution)
pygame.display.set_caption('MAgent Renderer Window')
elif mode == "rgb_array":
pygame.font.init()
self.resolution = base_resolution
self.display = pygame.Surface(base_resolution)
canvas_resolution = (base_resolution[0], base_resolution[1])
self.canvas = pygame.Surface(canvas_resolution)
self.text_formatter = pygame.font.SysFont(None, text_size, True)
self.banner_formatter = pygame.font.SysFont(None, banner_size, True)
self.bigscreen_formatter = pygame.font.SysFont(None, bigscreen_size, True)
self.map_size = (map_size, map_size)
self.frame_id = 0
self.old_data = None
self.new_data = None
self.need_static_update = True
self.animation_progress = 0
def get_banners(self, frame_id, resolution):
groups = self.env._get_groups_info()
def form_txt(index):
handle = self.handles[index]
color = tuple([int(a) for a in groups[index][2:]])
return '{}'.format(np.sum(self.env.get_alive(handle).astype(np.int32))), color
if len(self.handles) == 1:
result = [(form_txt(0), )]
if len(self.handles) == 2:
vs = ' vs ', (0, 0, 0)
result = [(form_txt(0), vs, form_txt(1))]
elif len(self.handles) == 4:
vs = ' vs ', (0, 0, 0)
comma = ', ', (0, 0, 0)
result = [(form_txt(0), comma, form_txt(1), vs, form_txt(2), comma, form_txt(3))]
else:
raise RuntimeError("bad number of handles")
return result
def close(self):
import pygame
pygame.display.quit()
pygame.quit()
def render(self, mode):
import os
os.environ['PYGAME_HIDE_SUPPORT_PROMPT'] = 'hide'
import pygame
env = self.env
self.groups = env._get_groups_info()
resolution = self.resolution
grid_map = np.zeros((resolution[0], resolution[1], 3), dtype=np.int16)
view_position = [self.map_size[0] / 2 * grid_size - resolution[0] / 2,
self.map_size[1] / 2 * grid_size - resolution[1] / 2]
groups = self.groups
banner_formatter = self.banner_formatter
status = True
triggered = False
# x_range: which vertical gridlines should be shown on the display
# y_range: which horizontal gridlines should be shown on the display
x_range = (
max(0, int(math.floor(max(0, view_position[0]) / grid_size))),
min(self.map_size[0], int(math.ceil(max(0, view_position[0] + resolution[0]) / grid_size)))
)
y_range = (
max(0, int(math.floor(max(0, view_position[1]) / grid_size))),
min(self.map_size[1], int(math.ceil(max(0, view_position[1] + resolution[1]) / grid_size)))
)
self.canvas.fill(background_rgb)
self.display.fill(background_rgb)
if self.need_static_update or True:
grids = pygame.Surface(resolution)
grids.fill(background_rgb)
if self.new_data is None or self.animation_progress > animation_total + animation_stop:
pos, event = env._get_render_info(x_range, y_range)
buffered_new_data = pos, event
if buffered_new_data is None:
buffered_new_data = self.new_data
self.old_data = self.new_data
self.new_data = buffered_new_data
self.animation_progress = 0
if self.new_data is not None:
if self.old_data is None and self.animation_progress == 0:
self.animation_progress = animation_total
if self.need_static_update or True:
pygame.pixelcopy.surface_to_array(grid_map, self.canvas)
for wall in env._get_walls_info():
x, y = wall[0], wall[1]
if x >= x_range[0] and x <= x_range[1] and y >= y_range[0] and y <= y_range[1]:
draw_rect_matrix(grid_map, (127, 127, 127),
(x * grid_size - view_position[0], y * grid_size - view_position[1]),
grid_size, grid_size, resolution)
pygame.pixelcopy.array_to_surface(self.canvas, grid_map)
for key in self.new_data[0]:
new_prop = self.new_data[0][key]
new_group = groups[new_prop[2]]
now_prop = new_prop
now_group = new_group
draw_rect(
self.canvas, (int(now_group[2]), int(now_group[3]), int(now_group[4])),
(
now_prop[0] * grid_size - view_position[0],
now_prop[1] * grid_size - view_position[1]
),
now_group[0] * grid_size,
now_group[1] * grid_size
)
for key, event_x, event_y in self.new_data[1]:
if key not in self.new_data[0]:
continue
new_prop = self.new_data[0][key]
new_group = groups[new_prop[2]]
now_prop = new_prop
now_group = new_group
draw_line(
self.canvas, attack_line_rgb,
(
now_prop[0] * grid_size - view_position[0] + now_group[0] / 2 * grid_size,
now_prop[1] * grid_size - view_position[1] + now_group[1] / 2 * grid_size
),
(
event_x * grid_size - view_position[0] + grid_size / 2,
event_y * grid_size - view_position[1] + grid_size / 2
)
)
draw_rect(
self.canvas, attack_dot_rgb,
(
event_x * grid_size - view_position[0] + grid_size / 2 - attack_dot_size * grid_size / 2,
event_y * grid_size - view_position[1] + grid_size / 2 - attack_dot_size * grid_size / 2,
),
attack_dot_size * grid_size,
attack_dot_size * grid_size
)
if status or triggered or self.animation_progress < animation_total + animation_stop:
self.animation_progress += 1
self.display.blit(self.canvas, (0, 7))
height_now = 0
for texts in self.get_banners(self.frame_id, resolution):
content = []
width, height = 0, 0
for text in texts:
text = banner_formatter.render(text[0], True, pygame.Color(*text[1]))
content.append((text, width))
width += text.get_width()
height = max(height, text.get_height())
start = (resolution[0] - width) / 2.0
for b in content:
self.display.blit(b[0], (start + b[1], height_now))
height_now += height + banner_spacing
if self.need_static_update:
self.need_static_update = False
observation = pygame.surfarray.pixels3d(self.display)
new_observation = | np.copy(observation) | numpy.copy |
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import unittest
import numpy as np
import pandas as pd
import ROOT
from context import tact
from tact import metrics
np.random.seed(52)
class ECDFTests(unittest.TestCase):
"""
Tests for metrics.ecdf
"""
def setUp(self):
self.a = np.linspace(0, 1, num=10)
np.random.shuffle(self.a)
self.w = | np.random.rand(10) | numpy.random.rand |
from .common import Benchmark
import numpy as np
avx_ufuncs = ['sin',
'cos',
'exp',
'log',
'sqrt',
'absolute',
'reciprocal',
'square',
'rint',
'floor',
'ceil' ,
'trunc',
'frexp',
'isnan',
'isfinite',
'isinf',
'signbit']
stride = [1, 2, 4]
dtype = ['f', 'd']
class AVX_UFunc(Benchmark):
params = [avx_ufuncs, stride, dtype]
param_names = ['avx_based_ufunc', 'stride', 'dtype']
timeout = 10
def setup(self, ufuncname, stride, dtype):
np.seterr(all='ignore')
try:
self.f = getattr(np, ufuncname)
except AttributeError:
raise NotImplementedError()
N = 10000
self.arr = np.ones(stride*N, dtype)
def time_ufunc(self, ufuncname, stride, dtype):
self.f(self.arr[::stride])
class AVX_UFunc_log(Benchmark):
params = [stride, dtype]
param_names = ['stride', 'dtype']
timeout = 10
def setup(self, stride, dtype):
np.seterr(all='ignore')
N = 10000
self.arr = np.array(np.random.random_sample(stride*N), dtype=dtype)
def time_log(self, stride, dtype):
np.log(self.arr[::stride])
avx_bfuncs = ['maximum',
'minimum']
class AVX_BFunc(Benchmark):
params = [avx_bfuncs, dtype, stride]
param_names = ['avx_based_bfunc', 'dtype', 'stride']
timeout = 10
def setup(self, ufuncname, dtype, stride):
np.seterr(all='ignore')
try:
self.f = getattr(np, ufuncname)
except AttributeError:
raise NotImplementedError()
N = 10000
self.arr1 = np.array(np.random.rand(stride*N), dtype=dtype)
self.arr2 = np.array(np.random.rand(stride*N), dtype=dtype)
def time_ufunc(self, ufuncname, dtype, stride):
self.f(self.arr1[::stride], self.arr2[::stride])
class AVX_ldexp(Benchmark):
params = [dtype, stride]
param_names = ['dtype', 'stride']
timeout = 10
def setup(self, dtype, stride):
np.seterr(all='ignore')
self.f = getattr(np, 'ldexp')
N = 10000
self.arr1 = np.array(np.random.rand(stride*N), dtype=dtype)
self.arr2 = np.array(np.random.rand(stride*N), dtype='i')
def time_ufunc(self, dtype, stride):
self.f(self.arr1[::stride], self.arr2[::stride])
cmplx_bfuncs = ['add',
'subtract',
'multiply',
'divide']
cmplxstride = [1, 2, 4]
cmplxdtype = ['F', 'D']
class AVX_cmplx_arithmetic(Benchmark):
params = [cmplx_bfuncs, cmplxstride, cmplxdtype]
param_names = ['bfunc', 'stride', 'dtype']
timeout = 10
def setup(self, bfuncname, stride, dtype):
np.seterr(all='ignore')
try:
self.f = getattr(np, bfuncname)
except AttributeError:
raise NotImplementedError()
N = 10000
self.arr1 = np.ones(stride*N, dtype)
self.arr2 = np.ones(stride*N, dtype)
def time_ufunc(self, bfuncname, stride, dtype):
self.f(self.arr1[::stride], self.arr2[::stride])
cmplx_ufuncs = ['reciprocal',
'absolute',
'square',
'conjugate']
class AVX_cmplx_funcs(Benchmark):
params = [cmplx_ufuncs, cmplxstride, cmplxdtype]
param_names = ['bfunc', 'stride', 'dtype']
timeout = 10
def setup(self, bfuncname, stride, dtype):
np.seterr(all='ignore')
try:
self.f = getattr(np, bfuncname)
except AttributeError:
raise NotImplementedError()
N = 10000
self.arr1 = | np.ones(stride*N, dtype) | numpy.ones |
# Copyright 2022 Huawei Technologies Co., Ltd & CPL YiQin GAO Research Group
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""structure module"""
import numpy as np
import mindspore.common.dtype as mstype
import mindspore.nn as nn
import mindspore.numpy as mnp
import mindspore.ops as ops
from mindspore import Parameter
from mindspore import Tensor
from mindspore import ms_function
from mindspore.ops import functional as F
from common import residue_constants
from common.utils import apply_to_point, torsion_angles_to_frames, frames_and_literature_positions_to_atom14_pos,\
pre_compose, scale_translation, to_tensor_new, generate_new_affine, to_tensor, from_tensor, vecs_to_tensor,\
atom14_to_atom37, get_exp_atom_pos, get_exp_frames
from common.utils import _invert_point, lecun_init
class InvariantPointAttention(nn.Cell):
"""Invariant Point attention module."""
def __init__(self, config, global_config, pair_dim):
"""Initialize.
Args:
config: Structure Module Config
global_config: Global Config of Model.
pair_dim: pair representation dimension.
"""
super().__init__()
self._dist_epsilon = 1e-8
self.config = config
self.num_head = config.num_head
self.num_scalar_qk = config.num_scalar_qk
self.num_scalar_v = config.num_scalar_v
self.num_point_v = config.num_point_v
self.num_point_qk = config.num_point_qk
self.num_channel = config.num_channel
self.projection_num = self.num_head * self.num_scalar_v + self.num_head * self.num_point_v * 4 + \
self.num_head * pair_dim
self.global_config = global_config
self.q_scalar = nn.Dense(config.num_channel, self.num_head * self.num_scalar_qk,
weight_init=lecun_init(config.num_channel)).to_float(mstype.float16)
self.kv_scalar = nn.Dense(config.num_channel, self.num_head * (self.num_scalar_qk + self.num_scalar_v),
weight_init=lecun_init(config.num_channel)).to_float(mstype.float16)
self.q_point_local = nn.Dense(config.num_channel, self.num_head * 3 * self.num_point_qk,
weight_init=lecun_init(config.num_channel)
).to_float(mstype.float16)
self.kv_point_local = nn.Dense(config.num_channel, self.num_head * 3 * (self.num_point_qk + self.num_point_v),
weight_init=lecun_init(config.num_channel)).to_float(mstype.float16)
self.soft_max = nn.Softmax()
self.soft_plus = ops.Softplus()
self.trainable_point_weights = Parameter(Tensor(np.ones((12,)), mstype.float32), name="trainable_point_weights")
self.attention_2d = nn.Dense(pair_dim, self.num_head, weight_init=lecun_init(pair_dim)).to_float(mstype.float16)
self.output_projection = nn.Dense(self.projection_num, self.num_channel, weight_init='zeros'
).to_float(mstype.float16)
self.scalar_weights = np.sqrt(1.0 / (3 * 16))
self.point_weights = np.sqrt(1.0 / (3 * 18))
self.attention_2d_weights = np.sqrt(1.0 / 3)
def construct(self, inputs_1d, inputs_2d, mask, rotation, translation):
"""Compute geometry-aware attention.
Args:
inputs_1d: (N, C) 1D input embedding that is the basis for the
scalar queries.
inputs_2d: (N, M, C') 2D input embedding, used for biases and values.
mask: (N, 1) mask to indicate which elements of inputs_1d participate
in the attention.
rotation: describe the orientation of every element in inputs_1d
translation: describe the position of every element in inputs_1d
Returns:
Transformation of the input embedding.
"""
num_residues, _ = inputs_1d.shape
# Improve readability by removing a large number of 'self's.
num_head = self.num_head
num_scalar_qk = self.num_scalar_qk
num_point_qk = self.num_point_qk
num_scalar_v = self.num_scalar_v
num_point_v = self.num_point_v
# Construct scalar queries of shape:
q_scalar = self.q_scalar(inputs_1d)
q_scalar = mnp.reshape(q_scalar, [num_residues, num_head, num_scalar_qk])
# Construct scalar keys/values of shape:
# [num_target_residues, num_head, num_points]
kv_scalar = self.kv_scalar(inputs_1d)
kv_scalar = mnp.reshape(kv_scalar, [num_residues, num_head, num_scalar_v + num_scalar_qk])
k_scalar, v_scalar = mnp.split(kv_scalar, [num_scalar_qk], axis=-1)
# Construct query points of shape:
# [num_residues, num_head, num_point_qk]
# First construct query points in local frame.
q_point_local = self.q_point_local(inputs_1d)
q_point_local = mnp.stack(mnp.split(q_point_local, 3, axis=-1), axis=0)
# Project query points into global frame.
q_point_global = apply_to_point(rotation, translation, q_point_local)
# Reshape query point for later use.
q_point0 = mnp.reshape(q_point_global[0], (num_residues, num_head, num_point_qk))
q_point1 = mnp.reshape(q_point_global[1], (num_residues, num_head, num_point_qk))
q_point2 = mnp.reshape(q_point_global[2], (num_residues, num_head, num_point_qk))
# Construct key and value points.
# Key points have shape [num_residues, num_head, num_point_qk]
# Value points have shape [num_residues, num_head, num_point_v]
# Construct key and value points in local frame.
kv_point_local = self.kv_point_local(inputs_1d)
kv_point_local = mnp.split(kv_point_local, 3, axis=-1)
# Project key and value points into global frame.
kv_point_global = apply_to_point(rotation, translation, kv_point_local)
kv_point_global0 = mnp.reshape(kv_point_global[0], (num_residues, num_head, (num_point_qk + num_point_v)))
kv_point_global1 = mnp.reshape(kv_point_global[1], (num_residues, num_head, (num_point_qk + num_point_v)))
kv_point_global2 = mnp.reshape(kv_point_global[2], (num_residues, num_head, (num_point_qk + num_point_v)))
# Split key and value points.
k_point0, v_point0 = mnp.split(kv_point_global0, [num_point_qk,], axis=-1)
k_point1, v_point1 = mnp.split(kv_point_global1, [num_point_qk,], axis=-1)
k_point2, v_point2 = mnp.split(kv_point_global2, [num_point_qk,], axis=-1)
trainable_point_weights = self.soft_plus(self.trainable_point_weights)
point_weights = self.point_weights * mnp.expand_dims(trainable_point_weights, axis=1)
v_point = [mnp.swapaxes(v_point0, -2, -3), mnp.swapaxes(v_point1, -2, -3), mnp.swapaxes(v_point2, -2, -3)]
q_point = [mnp.swapaxes(q_point0, -2, -3), mnp.swapaxes(q_point1, -2, -3), mnp.swapaxes(q_point2, -2, -3)]
k_point = [mnp.swapaxes(k_point0, -2, -3), mnp.swapaxes(k_point1, -2, -3), mnp.swapaxes(k_point2, -2, -3)]
dist2 = mnp.square(q_point[0][:, :, None, :] - k_point[0][:, None, :, :]) + \
mnp.square(q_point[1][:, :, None, :] - k_point[1][:, None, :, :]) + \
mnp.square(q_point[2][:, :, None, :] - k_point[2][:, None, :, :])
attn_qk_point = -0.5 * mnp.sum(point_weights[:, None, None, :] * dist2, axis=-1)
v = mnp.swapaxes(v_scalar, -2, -3)
q = mnp.swapaxes(self.scalar_weights * q_scalar, -2, -3)
k = mnp.swapaxes(k_scalar, -2, -3)
attn_qk_scalar = ops.matmul(q, mnp.swapaxes(k, -2, -1))
attn_logits = attn_qk_scalar + attn_qk_point
attention_2d = self.attention_2d(inputs_2d)
attention_2d = mnp.transpose(attention_2d, [2, 0, 1])
attention_2d = self.attention_2d_weights * attention_2d
attn_logits += attention_2d
mask_2d = mask * mnp.swapaxes(mask, -1, -2)
attn_logits -= 50 * (1. - mask_2d)
# attn_logits = attn_logits * mask_2d
# [num_head, num_query_residues, num_target_residues]
attn = self.soft_max(attn_logits)
# [num_head, num_query_residues, num_head * num_scalar_v]
result_scalar = ops.matmul(attn, v)
result_point_global = [mnp.swapaxes(mnp.sum(attn[:, :, :, None] * v_point[0][:, None, :, :], axis=-2), -2, -3),
mnp.swapaxes(mnp.sum(attn[:, :, :, None] * v_point[1][:, None, :, :], axis=-2), -2, -3),
mnp.swapaxes(mnp.sum(attn[:, :, :, None] * v_point[2][:, None, :, :], axis=-2), -2, -3)
]
result_point_global = [mnp.reshape(result_point_global[0], [num_residues, num_head * num_point_v]),
mnp.reshape(result_point_global[1], [num_residues, num_head * num_point_v]),
mnp.reshape(result_point_global[2], [num_residues, num_head * num_point_v])]
result_scalar = mnp.swapaxes(result_scalar, -2, -3)
result_scalar = mnp.reshape(result_scalar, [num_residues, num_head * num_scalar_v])
result_point_local = _invert_point(result_point_global, rotation, translation)
output_feature1 = result_scalar
output_feature20 = result_point_local[0]
output_feature21 = result_point_local[1]
output_feature22 = result_point_local[2]
output_feature3 = mnp.sqrt(self._dist_epsilon +
mnp.square(result_point_local[0]) +
mnp.square(result_point_local[1]) +
mnp.square(result_point_local[2]))
result_attention_over_2d = ops.matmul(mnp.swapaxes(attn, 0, 1), inputs_2d)
num_out = num_head * result_attention_over_2d.shape[-1]
output_feature4 = mnp.reshape(result_attention_over_2d, [num_residues, num_out])
final_act = mnp.concatenate([output_feature1, output_feature20, output_feature21,
output_feature22, output_feature3, output_feature4], axis=-1)
final_result = self.output_projection(final_act)
return final_result
class MultiRigidSidechain(nn.Cell):
"""Class to make side chain atoms."""
def __init__(self, config, global_config, single_repr_dim):
super().__init__()
self.config = config
self.global_config = global_config
self.input_projection = nn.Dense(single_repr_dim, config.num_channel, weight_init=lecun_init(single_repr_dim)
).to_float(mstype.float16)
self.input_projection_1 = nn.Dense(single_repr_dim, config.num_channel, weight_init=lecun_init(single_repr_dim)
).to_float(mstype.float16)
self.relu = nn.ReLU()
self.resblock1 = nn.Dense(config.num_channel, config.num_channel,
weight_init=lecun_init(config.num_channel,
initializer_name='relu')).to_float(mstype.float16)
self.resblock2 = nn.Dense(config.num_channel, config.num_channel, weight_init='zeros').to_float(
mstype.float16) # todo check
self.resblock1_1 = nn.Dense(config.num_channel, config.num_channel,
weight_init=lecun_init(config.num_channel, initializer_name='relu')
).to_float(mstype.float16)
self.resblock2_1 = nn.Dense(config.num_channel, config.num_channel, weight_init='zeros' # todo check
).to_float(mstype.float16)
self.unnormalized_angles = nn.Dense(config.num_channel, 14,
weight_init=lecun_init(config.num_channel)).to_float(mstype.float16)
self.print = ops.Print()
self.restype_atom14_to_rigid_group = Tensor(residue_constants.restype_atom14_to_rigid_group)
self.restype_atom14_rigid_group_positions = Tensor(residue_constants.restype_atom14_rigid_group_positions)
self.restype_atom14_mask = Tensor(residue_constants.restype_atom14_mask)
self.restype_rigid_group_default_frame = Tensor(residue_constants.restype_rigid_group_default_frame)
self.l2_normalize = ops.L2Normalize(axis=-1, epsilon=1e-12)
def construct(self, rotation, translation, act, initial_act, aatype):
"""Predict side chains using rotation and translation representations.
Args:
rotation: The rotation matrices.
translation: A translation matrices.
act: updated pair activations from structure module
initial_act: initial act representations (input of structure module)
aatype: Amino acid type representations
Returns:
angles, positions and new frames
"""
act1 = self.input_projection(self.relu(act.astype(mstype.float32)))
init_act1 = self.input_projection_1(self.relu(initial_act.astype(mstype.float32)))
# Sum the activation list (equivalent to concat then Linear).
act = act1 + init_act1
# Mapping with some residual blocks.
# for _ in range(self.config.num_residual_block):
# resblock1
old_act = act
act = self.resblock1(self.relu(act.astype(mstype.float32)))
act = self.resblock2(self.relu(act.astype(mstype.float32)))
act += old_act
# resblock2
old_act = act
act = self.resblock1_1(self.relu(act.astype(mstype.float32)))
act = self.resblock2_1(self.relu(act.astype(mstype.float32)))
act += old_act
# Map activations to torsion angles. Shape: (num_res, 14).
num_res = act.shape[0]
unnormalized_angles = self.unnormalized_angles(self.relu(act.astype(mstype.float32)))
unnormalized_angles = mnp.reshape(unnormalized_angles, [num_res, 7, 2])
unnormalized_angles = ops.Cast()(unnormalized_angles, mstype.float32)
angles = self.l2_normalize(unnormalized_angles)
backb_to_global = [rotation[0][0], rotation[0][1], rotation[0][2],
rotation[1][0], rotation[1][1], rotation[1][2],
rotation[2][0], rotation[2][1], rotation[2][2],
translation[0], translation[1], translation[2]]
all_frames_to_global = torsion_angles_to_frames(aatype, backb_to_global, angles,
self.restype_rigid_group_default_frame)
pred_positions = frames_and_literature_positions_to_atom14_pos(aatype, all_frames_to_global,
self.restype_atom14_to_rigid_group,
self.restype_atom14_rigid_group_positions,
self.restype_atom14_mask)
atom_pos = pred_positions
frames = all_frames_to_global
return angles, unnormalized_angles, atom_pos, frames
class FoldIteration(nn.Cell):
"""A single iteration of the main structure module loop."""
def __init__(self, config, global_config, pair_dim, single_repr_dim):
super().__init__()
self.config = config
self.global_config = global_config
self.drop_out = nn.Dropout(keep_prob=0.9)
self.attention_layer_norm = nn.LayerNorm([config.num_channel,], epsilon=1e-5)
self.transition_layer_norm = nn.LayerNorm([config.num_channel,], epsilon=1e-5)
self.transition = nn.Dense(config.num_channel, config.num_channel,
weight_init=lecun_init(config.num_channel, initializer_name='relu')
).to_float(mstype.float16)
self.transition_1 = nn.Dense(config.num_channel, config.num_channel,
weight_init=lecun_init(config.num_channel, initializer_name='relu')
).to_float(mstype.float16)
self.transition_2 = nn.Dense(config.num_channel, config.num_channel, weight_init='zeros'
).to_float(mstype.float16)
self.relu = nn.ReLU()
self.affine_update = nn.Dense(config.num_channel, 6, weight_init='zeros').to_float(mstype.float16)
self.attention_module = InvariantPointAttention(self.config, self.global_config, pair_dim)
self.mu_side_chain = MultiRigidSidechain(config.sidechain, global_config, single_repr_dim)
self.print = ops.Print()
def construct(self, act, static_feat_2d, sequence_mask, quaternion, rotation, translation, initial_act, aatype):
"""construct"""
attn = self.attention_module(act, static_feat_2d, sequence_mask, rotation, translation)
act += attn
act = self.drop_out(act)
act = self.attention_layer_norm(act.astype(mstype.float32))
# Transition
input_act = act
act = self.transition(act)
act = self.relu(act.astype(mstype.float32))
act = self.transition_1(act)
act = self.relu(act.astype(mstype.float32))
act = self.transition_2(act)
act += input_act
act = self.drop_out(act)
act = self.transition_layer_norm(act.astype(mstype.float32))
# This block corresponds to
# Jumper et al. (2021) Alg. 23 "Backbone update"
# Affine update
affine_update = self.affine_update(act)
quaternion, rotation, translation = pre_compose(quaternion, rotation, translation, affine_update)
_, rotation1, translation1 = scale_translation(quaternion, translation, rotation, 10.0)
angles_sin_cos, unnormalized_angles_sin_cos, atom_pos, frames = \
self.mu_side_chain(rotation1, translation1, act, initial_act, aatype)
affine_output = to_tensor_new(quaternion, translation)
quaternion = F.stop_gradient(quaternion)
rotation = F.stop_gradient(rotation)
return act, quaternion, translation, rotation, affine_output, angles_sin_cos, unnormalized_angles_sin_cos, \
atom_pos, frames
class StructureModule(nn.Cell):
"""StructureModule as a network head."""
def __init__(self, config, single_repr_dim, pair_dim, global_config=None, compute_loss=True):
super(StructureModule, self).__init__()
self.config = config
self.global_config = global_config
self.compute_loss = compute_loss
self.fold_iteration = FoldIteration(self.config, global_config, pair_dim, single_repr_dim)
self.single_layer_norm = nn.LayerNorm([single_repr_dim,], epsilon=1e-5)
self.initial_projection = nn.Dense(single_repr_dim, self.config.num_channel,
weight_init=lecun_init(single_repr_dim)).to_float(mstype.float16)
self.pair_layer_norm = nn.LayerNorm([pair_dim,], epsilon=1e-5)
self.num_layer = config.num_layer
self.indice0 = Tensor(
np.arange(global_config.seq_length).reshape((-1, 1, 1)).repeat(37, axis=1).astype("int32"))
self.traj_w = Tensor( | np.array([1.] * 4 + [config.position_scale] * 3) | numpy.array |
""" Use simple subclassing example from numpy docs:
https://docs.scipy.org/doc/numpy-1.14.0/user/basics.subclassing.html
"""
import numpy as np
class RealisticInfoArray(np.ndarray):
def __new__(cls, input_array, info=None):
# Input array is an already formed ndarray instance
# We first cast to be our class type
obj = | np.asarray(input_array) | numpy.asarray |
# -*- coding: utf-8 -*-
"""
# @file name : model_trainer.py
# @author : JLChen
# @date : 2020-02-29
# @brief : 模型训练类
"""
import torch
import numpy as np
from collections import Counter
from tools.evalution_segmentaion import eval_semantic_segmentation
from torch.nn.utils import clip_grad_value_
class ModelTrainer(object):
@staticmethod
def train(data_loader, model, loss_f, cfg, optimizer, epoch_idx, logger):
model.train()
class_num = data_loader.dataset.cls_num
conf_mat = np.zeros((class_num, class_num))
loss_sigma = []
train_acc = []
train_miou = []
grad_lst_iter = []
for i, data in enumerate(data_loader):
inputs, labels = data
inputs, labels = inputs.to(cfg.device), labels.to(cfg.device)
# forward & backward
outputs = model(inputs)
optimizer.zero_grad()
loss = loss_f(outputs.cpu(), labels.cpu())
loss.backward()
if cfg.is_clip:
clip_grad_value_(model.parameters(), cfg.clip_value)
if cfg.hist_grad:
grad_lst_tmp = []
for p in model.parameters():
grad_lst_tmp.append(torch.max(p.grad.abs()).cpu().numpy())
grad_lst_iter.append(max(grad_lst_tmp).flatten()[0])
optimizer.step()
# 评估
pre_label = outputs.max(dim=1)[1].data.cpu().numpy() # (bs, 360, 480)
pre_label = [i for i in pre_label] # 一个元素是一个样本的预测。pre_label[0].shape = (360,480)
true_label = labels.data.cpu().numpy()
true_label = [i for i in true_label] # true_label[0].shape (360, 480)
eval_metrix = eval_semantic_segmentation(pre_label, true_label, class_num)
train_acc.append(eval_metrix['mean_class_accuracy'])
train_miou.append(eval_metrix['miou'])
conf_mat += eval_metrix["conf_mat"]
loss_sigma.append(loss.item())
# 间隔 log_interval 个iteration 打印一次训练信息
if i % cfg.log_interval == cfg.log_interval - 1:
logger.info('|Epoch[{}/{}]||batch[{}/{}]|batch_loss: {:.4f}||mIoU {:.4f}|'.format(
epoch_idx, cfg.max_epoch, i + 1, len(data_loader), loss.item(), eval_metrix['miou']))
loss_mean = np.mean(loss_sigma)
acc_mean = np.mean(train_acc)
miou_mean = np.mean(train_miou)
return loss_mean, acc_mean, conf_mat, miou_mean, grad_lst_iter
@staticmethod
def valid(data_loader, model, loss_f, cfg):
model.eval()
class_num = data_loader.dataset.cls_num
conf_mat = np.zeros((class_num, class_num))
loss_sigma = []
valid_acc = []
valid_miou = []
for i, data in enumerate(data_loader):
inputs, labels = data
inputs, labels = inputs.to(cfg.device), labels.to(cfg.device)
outputs = model(inputs)
loss = loss_f(outputs.cpu(), labels.cpu())
# 统计loss
loss_sigma.append(loss.item())
# 评估
pre_label = outputs.max(dim=1)[1].data.cpu().numpy() # (bs, 360, 480)
pre_label = [i for i in pre_label] # 一个元素是一个样本的预测。pre_label[0].shape = (360,480)
true_label = labels.data.cpu().numpy()
true_label = [i for i in true_label] # true_label[0].shape (360, 480)
eval_metrix = eval_semantic_segmentation(pre_label, true_label, class_num)
valid_acc.append(eval_metrix['mean_class_accuracy'])
valid_miou.append(eval_metrix['miou'])
conf_mat += eval_metrix["conf_mat"]
loss_sigma.append(loss.item())
loss_mean = np.mean(loss_sigma)
acc_mean = np.mean(valid_acc)
miou_mean = | np.mean(valid_miou) | numpy.mean |
import os
import pdb # noqa:F401
import sys
root = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
sys.path.insert(0, root)
import numpy as np
import pytest
from numpy.testing import assert_array_equal
from numpy.testing import assert_array_almost_equal
from src.interpolate import get_interpolation_grid_regular
from src.interpolate import get_interpolation_grid_sparse
from src.interpolate import interpolate_locally_step
from src.interpolate import interpolate_locally_batch
# from src.functions_to_approximate import zhou_readable
#########################################################################
# FIXTURES
#########################################################################
@pytest.fixture
def setup_get_interpolation_grid():
out = {}
out["dims"] = 2
out["grid_min"] = np.array(object=[1.0, 1.0], dtype=float)
out["grid_max"] = | np.array(object=[9.0, 9.0], dtype=float) | numpy.array |
#
# Copyright © 2018 United States Government as represented by the Administrator of the
# National Aeronautics and Space Administration. All Rights Reserved.
#
import time
import numpy as np
import sunpy.map
import sunpy.io
import json
import astropy.units as u
import pandas as pd
from scipy import stats
from scipy.spatial import ConvexHull
from astropy.utils.data import download_file
import urllib
from datetime import datetime, timedelta
import requests
from pyquaternion import Quaternion
# quantity of polygons
n = 21
# domain definition
theta = np.linspace(0, np.pi / 2, n)
phi = np.linspace(0, 2 * np.pi, n)
theta, phi = np.meshgrid(theta, phi)
# Constant for aspect ratio of lemniscate silhouette
AU_REFERENCE_CUBE = 1
# constant for domain and grid inits
GRID_HALF_WIDTH = 800
# function takes care of updating all of the points for the different plots
def plot_update(radial, angular, long, lat):
# data calculation section for width and distance interaction with figure
# scalars of the lemniscate
# c3 is not stored because it is always 1
lem_distance_c_straight_pixel = (radial * u.solRad).to(u.km) * (
GRID_HALF_WIDTH / (AU_REFERENCE_CUBE * u.AU).to(u.km))
c_one = lem_distance_c_straight_pixel
c_two = c_one * np.tan(((angular / 2) * u.deg))
x_mod = c_one * np.cos(theta)
y_mod = c_two * | np.cos(theta) | numpy.cos |
# !/usr/bin/env python
# Created by "Thieu" at 15:05, 03/06/2021 ----------%
# Email: <EMAIL> %
# Github: https://github.com/thieu1995 %
# --------------------------------------------------%
import numpy as np
from math import gamma
from copy import deepcopy
from mealpy.optimizer import Optimizer
class BaseSLO(Optimizer):
"""
The original version of: Sea Lion Optimization Algorithm (SLO)
Links:
1. https://www.researchgate.net/publication/333516932_Sea_Lion_Optimization_Algorithm
2. https://doi.org/10.14569/IJACSA.2019.0100548
Notes
~~~~~
+ The original paper is unclear in some equations and parameters
+ This version is based on my expertise
Examples
~~~~~~~~
>>> import numpy as np
>>> from mealpy.swarm_based.SLO import BaseSLO
>>>
>>> def fitness_function(solution):
>>> return np.sum(solution**2)
>>>
>>> problem_dict1 = {
>>> "fit_func": fitness_function,
>>> "lb": [-10, -15, -4, -2, -8],
>>> "ub": [10, 15, 12, 8, 20],
>>> "minmax": "min",
>>> }
>>>
>>> epoch = 1000
>>> pop_size = 50
>>> model = BaseSLO(problem_dict1, epoch, pop_size)
>>> best_position, best_fitness = model.solve()
>>> print(f"Solution: {best_position}, Fitness: {best_fitness}")
References
~~~~~~~~~~
[1] <NAME>., <NAME>. and <NAME>., 2019. Sea lion optimization algorithm. Sea, 10(5), p.388.
"""
def __init__(self, problem, epoch=10000, pop_size=100, **kwargs):
"""
Args:
problem (dict): The problem dictionary
epoch (int): maximum number of iterations, default = 10000
pop_size (int): number of population size, default = 100
"""
super().__init__(problem, kwargs)
self.epoch = self.validator.check_int("epoch", epoch, [1, 100000])
self.pop_size = self.validator.check_int("pop_size", pop_size, [10, 10000])
self.nfe_per_epoch = self.pop_size
self.sort_flag = False
def amend_position(self, position=None, lb=None, ub=None):
"""
Depend on what kind of problem are we trying to solve, there will be an different amend_position
function to rebound the position of agent into the valid range.
Args:
position: vector position (location) of the solution.
lb: list of lower bound values
ub: list of upper bound values
Returns:
Amended position (make the position is in bound)
"""
return np.where(np.logical_and(lb <= position, position <= ub), position, np.random.uniform(lb, ub))
def evolve(self, epoch):
"""
The main operations (equations) of algorithm. Inherit from Optimizer class
Args:
epoch (int): The current iteration
"""
c = 2 - 2 * epoch / self.epoch
t0 = np.random.rand()
v1 = np.sin(2 * np.pi * t0)
v2 = np.sin(2 * np.pi * (1 - t0))
SP_leader = np.abs(v1 * (1 + v2) / v2) # In the paper this is not clear how to calculate
pop_new = []
for idx in range(0, self.pop_size):
if SP_leader < 0.25:
if c < 1:
pos_new = self.g_best[self.ID_POS] - c * np.abs(2 * np.random.rand() *
self.g_best[self.ID_POS] - self.pop[idx][self.ID_POS])
else:
ri = np.random.choice(list(set(range(0, self.pop_size)) - {idx})) # random index
pos_new = self.pop[ri][self.ID_POS] - c * np.abs(2 * np.random.rand() *
self.pop[ri][self.ID_POS] - self.pop[idx][self.ID_POS])
else:
pos_new = np.abs(self.g_best[self.ID_POS] - self.pop[idx][self.ID_POS]) * \
np.cos(2 * np.pi * np.random.uniform(-1, 1)) + self.g_best[self.ID_POS]
# In the paper doesn't check also doesn't update old solution at this point
pos_new = self.amend_position(pos_new, self.problem.lb, self.problem.ub)
pop_new.append([pos_new, None])
pop_new = self.update_target_wrapper_population(pop_new)
self.pop = self.greedy_selection_population(self.pop, pop_new)
class ModifiedSLO(Optimizer):
"""
My modified version of: Sea Lion Optimization (M-SLO)
Notes
~~~~~
+ Use the idea of shrink encircling combine with levy flight techniques
+ Beside, the idea of local best in PSO is used
Examples
~~~~~~~~
>>> import numpy as np
>>> from mealpy.swarm_based.SLO import ModifiedSLO
>>>
>>> def fitness_function(solution):
>>> return np.sum(solution**2)
>>>
>>> problem_dict1 = {
>>> "fit_func": fitness_function,
>>> "lb": [-10, -15, -4, -2, -8],
>>> "ub": [10, 15, 12, 8, 20],
>>> "minmax": "min",
>>> }
>>>
>>> epoch = 1000
>>> pop_size = 50
>>> model = ModifiedSLO(problem_dict1, epoch, pop_size)
>>> best_position, best_fitness = model.solve()
>>> print(f"Solution: {best_position}, Fitness: {best_fitness}")
"""
ID_LOC_POS = 2
ID_LOC_FIT = 3
def __init__(self, problem, epoch=10000, pop_size=100, **kwargs):
"""
Args:
problem (dict): The problem dictionary
epoch (int): maximum number of iterations, default = 10000
pop_size (int): number of population size, default = 100
"""
super().__init__(problem, kwargs)
self.epoch = self.validator.check_int("epoch", epoch, [1, 100000])
self.pop_size = self.validator.check_int("pop_size", pop_size, [10, 10000])
self.nfe_per_epoch = self.pop_size
self.sort_flag = False
def create_solution(self, lb=None, ub=None):
"""
To get the position, fitness wrapper, target and obj list
+ A[self.ID_POS] --> Return: position
+ A[self.ID_TAR] --> Return: [target, [obj1, obj2, ...]]
+ A[self.ID_TAR][self.ID_FIT] --> Return: target
+ A[self.ID_TAR][self.ID_OBJ] --> Return: [obj1, obj2, ...]
Returns:
list: wrapper of solution with format [position, [target, [obj1, obj2, ...]], local_pos, local_fit]
"""
## Increase exploration at the first initial population using opposition-based learning.
position = self.generate_position(lb, ub)
position = self.amend_position(position, lb, ub)
target = self.get_target_wrapper(position)
local_pos = lb + ub - position
local_pos = self.amend_position(local_pos, lb, ub)
local_target = self.get_target_wrapper(local_pos)
if self.compare_agent([None, target], [None, local_target]):
return [local_pos, local_target, position, target]
else:
return [position, target, local_pos, local_target]
def _shrink_encircling_levy__(self, current_pos, epoch, dist, c, beta=1):
up = gamma(1 + beta) * np.sin(np.pi * beta / 2)
down = (gamma((1 + beta) / 2) * beta * np.power(2, (beta - 1) / 2))
xich_ma_1 = np.power(up / down, 1 / beta)
xich_ma_2 = 1
a = np.random.normal(0, xich_ma_1, 1)
b = np.random.normal(0, xich_ma_2, 1)
LB = 0.01 * a / (np.power(np.abs(b), 1 / beta)) * dist * c
D = np.random.uniform(self.problem.lb, self.problem.ub)
levy = LB * D
return (current_pos - np.sqrt(epoch + 1) * np.sign(np.random.random(1) - 0.5)) * levy
def evolve(self, epoch):
"""
The main operations (equations) of algorithm. Inherit from Optimizer class
Args:
epoch (int): The current iteration
"""
c = 2 - 2 * epoch / self.epoch
if c > 1:
pa = 0.3 # At the beginning of the process, the probability for shrinking encircling is small
else:
pa = 0.7 # But at the end of the process, it become larger. Because sea lion are shrinking encircling prey
SP_leader = np.random.uniform(0, 1)
pop_new = []
for idx in range(0, self.pop_size):
agent = deepcopy(self.pop[idx])
if SP_leader >= 0.6:
pos_new = np.cos(2 * np.pi * np.random.normal(0, 1)) * \
np.abs(self.g_best[self.ID_POS] - self.pop[idx][self.ID_POS]) + self.g_best[self.ID_POS]
else:
if np.random.uniform() < pa:
dist1 = | np.random.uniform() | numpy.random.uniform |
__author__ = '<NAME>'
from qparallel.clustering.base import Model
import numpy as np
from pathos.multiprocessing import ProcessPool as Pool
from qparallel.helpers import (
split_data
)
class KMeans(Model):
"""
Currently supports only 2d arrays
"""
def __init__(self, k, cpu_count=-1, distance_metric='euclidean',
max_iter=300, n_iter=10, tol=1e-4, verbose=0):
"""
:param k: number of clusters
:param n_proc: number of cpu
:param distance_metric: optional, only euclidean is implementedd
:param max_iter: optional, maximum number of iterations
:param n_iter: optional, number of iterations
:param tol: optional, tolerance value
:param verbose: optional, print information
"""
super(KMeans, self).__init__(cpu_count=cpu_count)
self.k = k
self.distance_metric = distance_metric
self.max_iter = max_iter
self.n_iter = n_iter
self.tol = 1e-4
self.verbose = verbose
def _check_x(self, x):
# restrict number of dims if needed
if isinstance(x, list):
return np.asarray(x)
elif isinstance(x, np.ndarray):
return x
else:
raise TypeError
def _clusters_init(self, x):
# change to get centers from chunks
inx = np.random.choice(range(x.shape[0]), self.k, replace=False)
self.cluster_centers = x[inx]
def _proc_run(self, chunk):
chunk = np.array(chunk)
C = np.array([np.argmin([np.dot(x_i - y_k, x_i - y_k) for y_k in self.cluster_centers]) for x_i in chunk])
centroids = []
for i in range(self.k):
if chunk[C == i].size == 0:
centroids.append([np.nan, np.nan])
else:
centroids += [chunk[C == i].mean(axis=0).tolist()]
return centroids
def fit(self, data):
"""
:param data: np.array
:return:
example:
>>> array = np.array([[2, 3], [9, 8], [4, 9], [5, 2]])
>>> model = KMeans(2,4)
>>> model.fit(array)
>>> print(model.labels)
"""
x = data
self._check_x(x)
self.random_state = None
chunks = split_data(x[x[:, 0].argsort()], 2)
self._clusters_init(x)
for i in range(self.n_iter):
with Pool(self.n_proc) as pool:
arrays = pool.map(self._proc_run, chunks)
arrays = np.array(arrays)
self.cluster_centers = np.array( | np.nanmean(arrays, axis=0) | numpy.nanmean |
import numpy as np
import os
import re
import requests
import sys
import time
from netCDF4 import Dataset
import pandas as pd
from bs4 import BeautifulSoup
from tqdm import tqdm
# setup constants used to access the data from the different M2M interfaces
BASE_URL = 'https://ooinet.oceanobservatories.org/api/m2m/' # base M2M URL
SENSOR_URL = '12576/sensor/inv/' # Sensor Information
# setup access credentials
AUTH = ['OOIAPI-853A3LA6QI3L62', '<KEY>']
def M2M_Call(uframe_dataset_name, start_date, end_date):
options = '?beginDT=' + start_date + '&endDT=' + end_date + '&format=application/netcdf'
r = requests.get(BASE_URL + SENSOR_URL + uframe_dataset_name + options, auth=(AUTH[0], AUTH[1]))
if r.status_code == requests.codes.ok:
data = r.json()
else:
return None
# wait until the request is completed
print('Waiting for OOINet to process and prepare data request, this may take up to 20 minutes')
url = [url for url in data['allURLs'] if re.match(r'.*async_results.*', url)][0]
check_complete = url + '/status.txt'
with tqdm(total=400, desc='Waiting') as bar:
for i in range(400):
r = requests.get(check_complete)
bar.update(1)
if r.status_code == requests.codes.ok:
bar.n = 400
bar.last_print_n = 400
bar.refresh()
print('\nrequest completed in %f minutes.' % elapsed)
break
else:
time.sleep(3)
elapsed = (i * 3) / 60
return data
def M2M_Files(data, tag=''):
"""
Use a regex tag combined with the results of the M2M data request to collect the data from the THREDDS catalog.
Collected data is gathered into an xarray dataset for further processing.
:param data: JSON object returned from M2M data request with details on where the data is to be found for download
:param tag: regex tag to use in discriminating the data files, so we only collect the correct ones
:return: the collected data as an xarray dataset
"""
# Create a list of the files from the request above using a simple regex as a tag to discriminate the files
url = [url for url in data['allURLs'] if re.match(r'.*thredds.*', url)][0]
files = list_files(url, tag)
return files
def list_files(url, tag=''):
"""
Function to create a list of the NetCDF data files in the THREDDS catalog created by a request to the M2M system.
:param url: URL to user's THREDDS catalog specific to a data request
:param tag: regex pattern used to distinguish files of interest
:return: list of files in the catalog with the URL path set relative to the catalog
"""
page = requests.get(url).text
soup = BeautifulSoup(page, 'html.parser')
pattern = re.compile(tag)
return [node.get('href') for node in soup.find_all('a', text=pattern)]
def M2M_Data(nclist,variables):
thredds = 'https://opendap.oceanobservatories.org/thredds/dodsC/ooi/'
#nclist is going to contain more than one url eventually
for jj in range(len(nclist)):
url=nclist[jj]
url=url[25:]
dap_url = thredds + url + '#fillmismatch'
openFile = Dataset(dap_url,'r')
for ii in range(len(variables)):
dum = openFile.variables[variables[ii].name]
variables[ii].data = np.append(variables[ii].data, dum[:].data)
tmp = variables[0].data/60/60/24
time_converted = pd.to_datetime(tmp, unit='D', origin=pd.Timestamp('1900-01-01'))
return variables, time_converted
class var(object):
def __init__(self):
"""A Class that generically holds data with a variable name
and the units as attributes"""
self.name = ''
self.data = np.array([])
self.units = ''
def __repr__(self):
return_str = "name: " + self.name + '\n'
return_str += "units: " + self.units + '\n'
return_str += "data: size: " + str(self.data.shape)
return return_str
class structtype(object):
def __init__(self):
""" A class that imitates a Matlab structure type
"""
self._data = []
def __getitem__(self, index):
"""implement index behavior in the struct"""
if index == len(self._data):
self._data.append(var())
return self._data[index]
def __len__(self):
return len(self._data)
def M2M_URLs(platform_name,node,instrument_class,method):
var_list = structtype()
#MOPAK
if platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSPM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/SBS01/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#METBK
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
#FLORT
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/06-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/06-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/04-FLORTK000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
#FDCHP
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/08-FDCHPA000/telemetered/fdchp_a_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#DOSTA
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/02-DOFSTK000/telemetered/dofst_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
#ADCP
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/01-ADCPTA000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/01-ADCPTC000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/01-ADCPTA000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/01-ADCPTC000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/04-ADCPTM000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/04-ADCPTM000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/04-ADCPTC000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/04-ADCPSJ000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
#ZPLSC
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#WAVSS
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
#VELPT
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = | np.array([]) | numpy.array |
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""servable config for pangu alpha"""
import os
import time
from easydict import EasyDict
import numpy as np
from mindspore_serving.server import register
from mindspore_serving.server import distributed
from pangu.tokenization_jieba import JIEBATokenizer
cur_dir = os.path.abspath(os.path.dirname(__file__))
tokenizer_path = os.path.join(cur_dir, "tokenizer")
tokenizer = JIEBATokenizer(os.path.join(tokenizer_path, "vocab.vocab"), os.path.join(tokenizer_path, "vocab.model"))
end_token = tokenizer.eot_id
config = EasyDict({
'frequency_penalty': 1.5,
'presence_penalty': 0.3,
'max_generate_length': 500,
'top_k_num': 3,
'top_p': 1.0,
'end_token': 9,
'seq_length': 1024,
'vocab_size': 40000,
})
def topk_fun(logits, topk=5):
"""Get topk"""
target_column = logits[0].tolist()
sorted_array = [(k, v) for k, v in enumerate(target_column)]
sorted_array.sort(key=lambda x: x[1], reverse=True)
topk_array = sorted_array[:topk]
index, value = zip(*topk_array)
index = np.array([index])
value = np.array([value])
return value, index
distributed.declare_servable(rank_size=8, stage_size=1, with_batch_dim=False)
@register.register_method(output_names=["logits"])
def predict_sub0(input_ids, current_index, init, batch_valid_length):
logits = register.call_servable(input_ids, current_index, init, batch_valid_length, subgraph=0)
return logits
@register.register_method(output_names=["logits"])
def predict_sub1(input_id, current_index, init, batch_valid_length):
logits = register.call_servable(input_id, current_index, init, batch_valid_length, subgraph=1)
return logits
sub0_servable = register.PipelineServable(servable_name="pangu", method="predict_sub0")
sub1_servable = register.PipelineServable(servable_name="pangu", method="predict_sub1")
@register.register_pipeline(output_names=["output_sentence"])
def predict(input_sentence):
"""generate sentence with given input_sentence"""
print(f"----------------------------- begin {input_sentence} ---------", flush=True)
time_start = time.time()
tokens = tokenizer.tokenize(input_sentence)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
outputs = generate_increment(input_ids)
return_tokens = tokenizer.convert_ids_to_tokens(outputs)
reply = "".join(return_tokens)
print(f"time cost {(time.time() - time_start) * 1000}ms, request '{input_sentence}' get reply '{reply}'",
flush=True)
return reply
def generate_increment(origin_inputs):
"""
Text generation for incremental inference
Inputs:
model: the model for inferencing
origin_inputs: the original inputs based on which the model will continue writing
config: inference configurations
Returns:
outputs: the ids for the generated text
"""
# Get configurations for inference
frequency_penalty = config.frequency_penalty
presence_penalty = config.presence_penalty
top_p = config.top_p
top_k_num = config.top_k_num
max_generate_length = config.max_generate_length
seq_length = config.seq_length
vocab_size = config.vocab_size
# Init outputs with original inputs
outputs = origin_inputs
origin_inputs = | np.array([origin_inputs]) | numpy.array |
# ===== IMPORTS =====
# === Standard library ===
import logging
import pathlib
import json
# === Thirdparty ===
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
import torch
import torch.utils.data as tdata
import torch.nn.functional as tfunctional
import torch.nn.utils.rnn as tutilsrnn
from sklearn.metrics import precision_recall_fscore_support
# === Local ===
import ml4logs
# ===== GLOBALS =====
logger = logging.getLogger(__name__)
NORMAL_LABEL = 0
ABNORMAL_LABEL = 1
# ===== CLASSES =====
class SequenceDataset(tdata.Dataset):
def __init__(self, inputs, outputs, labels):
self._inputs = inputs
self._outputs = outputs
self._labels = labels
def __getitem__(self, idx):
return self._inputs[idx], self._outputs[idx], self._labels[idx]
def __len__(self):
return len(self._inputs)
class Seq2SeqModel(torch.nn.Module):
def __init__(self, f_dim, n_lstm_layers=1,
n_hidden_linears=2, linear_width=300, linear_norm=False):
super().__init__()
self._lstm = torch.nn.LSTM(f_dim, f_dim, batch_first=True,
num_layers=n_lstm_layers)
linears = [torch.nn.Linear(f_dim, linear_width)]
if linear_norm:
linears.append(torch.nn.LayerNorm(linear_width))
linears.append(torch.nn.LeakyReLU())
for _ in range(n_hidden_linears):
linears.append(torch.nn.Linear(linear_width, linear_width))
if linear_norm:
linears.append(torch.nn.LayerNorm(linear_width))
linears.append(torch.nn.LeakyReLU())
linears.append(torch.nn.Linear(linear_width, f_dim))
self._linears = torch.nn.Sequential(*linears)
def forward(self, X):
out, _ = self._lstm(X)
out, lengths = tutilsrnn.pad_packed_sequence(X, batch_first=True)
return self._linears(out)
class Seq2SeqModelTrainer:
def __init__(self, device, f_dim, model_kwargs,
optim_kwargs, lr_scheduler_kwargs):
self._model = Seq2SeqModel(f_dim, **model_kwargs).to(device)
self._criterion = torch.nn.MSELoss()
self._optimizer = torch.optim.Adam(
self._model.parameters(), **optim_kwargs)
self._scheduler = torch.optim.lr_scheduler.ExponentialLR(
self._optimizer, **lr_scheduler_kwargs)
self._device = device
self._threshold = 0.0
def train(self, dataloader):
self._model.train()
train_loss = 0.0
for inputs, outputs, _ in dataloader:
results, outputs = self._forward(inputs, outputs)
loss = self._criterion(results, outputs)
train_loss += loss.item()
self._optimizer.zero_grad()
loss.backward()
self._optimizer.step()
self._scheduler.step()
return train_loss / len(dataloader)
def evaluate(self, dataloader):
self._model.eval()
total_loss = 0.0
with torch.no_grad():
for inputs, outputs, _ in dataloader:
results, outputs = self._forward(inputs, outputs)
loss = self._criterion(results, outputs)
total_loss += loss.item()
return total_loss / len(dataloader)
def compute_threshold(self, dataloader):
self._model.eval()
errors = []
for inputs, outputs, _ in dataloader:
results, outputs = self._forward(inputs, outputs)
loss = tfunctional.mse_loss(results, outputs, reduction='none')
loss = torch.mean(loss, dim=2)
loss = torch.mean(loss, dim=1)
errors.extend(loss.tolist())
self._threshold = np.mean(errors) + 2 * np.std(errors)
return self._threshold
def test(self, dataloader, threshold=None):
if threshold is None:
threshold = self._threshold
self._model.eval()
labels = []
result_labels = []
with torch.no_grad():
for inputs, outputs, labels_ in dataloader:
results, outputs = self._forward(inputs, outputs)
loss = tfunctional.mse_loss(results, outputs, reduction='none')
loss = torch.mean(loss, dim=2)
loss = torch.mean(loss, dim=1)
result_labels_ = torch.where(
loss > threshold, ABNORMAL_LABEL, NORMAL_LABEL)
labels.append(labels_.numpy())
result_labels.append(result_labels_.to(device='cpu').numpy())
labels = np.concatenate(labels)
results = np.concatenate(result_labels)
precision, recall, f1, _ = precision_recall_fscore_support(
labels, results, average='binary', zero_division=0
)
return {
'threshold': threshold,
'precision': precision,
'recall': recall,
'f1': f1
}
def threshold(self):
return self._threshold
def _forward(self, inputs, outputs):
inputs = inputs.to(self._device)
outputs = outputs.to(self._device)
outputs, lengths = tutilsrnn.pad_packed_sequence(
outputs,
batch_first=True
)
results = self._model(inputs)
results = tutilsrnn.pack_padded_sequence(
results,
lengths,
batch_first=True,
enforce_sorted=False
)
results, _ = tutilsrnn.pad_packed_sequence(
results,
batch_first=True
)
return results, outputs
# ===== FUNCTIONS =====
def train_test_seq2seq(args):
np.random.seed(args['seed'])
if args['device'] == 'cuda' and not torch.cuda.is_available():
logger.warning("CUDA not available, falling back to CPU!")
args['device'] = 'cpu'
torch.manual_seed(args['seed'])
features_path = pathlib.Path(args['features_path'])
blocks_path = pathlib.Path(args['blocks_path'])
labels_path = pathlib.Path(args['labels_path'])
stats_path = pathlib.Path(args['stats_path'])
ml4logs.utils.mkdirs(files=[stats_path])
features = np.load(features_path).astype(np.float32)
blocks = np.load(blocks_path)
labels = np.load(labels_path)
logger.info('Loaded features %s', features.shape)
logger.info('Loaded blocks %s', blocks.shape)
logger.info('Loaded labels %s', labels.shape)
blocks_unique, blocks_counts = np.unique(blocks, return_counts=True)
blocks_used = blocks_unique[blocks_counts > 1]
logger.info('Blocks used %s', blocks_used.shape)
normal_blocks = blocks_used[labels[blocks_used] == NORMAL_LABEL]
test_abnormal_blocks = blocks_used[labels[blocks_used] == ABNORMAL_LABEL]
logger.info('Normal blocks %s, abnormal blocks %s',
normal_blocks.shape, test_abnormal_blocks.shape)
logger.info('Split with train size = %.2f', args['train_size'])
train_blocks, test_normal_blocks = train_test_split(
normal_blocks,
train_size=args['train_size']
)
logger.info('Split with validation size = %.2f', args['validation_size'])
test_blocks = np.concatenate((test_normal_blocks, test_abnormal_blocks))
train_blocks, validation_blocks = train_test_split(
train_blocks,
test_size=args['validation_size']
)
logger.info('Train normal blocks %s', train_blocks.shape)
logger.info('Validation normal blocks %s', validation_blocks.shape)
logger.info('Test normal blocks %s', test_normal_blocks.shape)
logger.info('Test abnormal blocks %s', test_abnormal_blocks.shape)
scaler = StandardScaler()
logger.info('Fit StandardScaler with train blocks')
scaler.fit(features[ | np.isin(blocks, train_blocks) | numpy.isin |
"""Methods for working with data and numpy arrays"""
import warnings
import numpy as np
def arr_sample(arr, rate):
"""Return an array linearly sampled from the input array at the given rate.
Examples
--------
* [1, 2, 3, 4] and rate 2 -> [1, 3]
* [1, 2, 3, 4] and rate 0.5 -> [1, 1, 2, 2, 3, 3, 4, 4]
"""
if arr.ndim != 1:
raise ValueError("Only 1d arrays can be sampled from.")
i = 0
out = []
while i < arr.shape[0]:
out.append(arr[np.floor(i).astype(np.int)])
i += rate
return np.array(out)
def factors(x):
"""Return the factors of x.
Parameters
----------
x : int
The number to factorize. Must be a non-zero integer
Returns
-------
factors : set
The set of factors for x
"""
if x == 0 or x % 1 != 0:
raise ValueError("Factors can only be found with non-zero integers")
if x < 0:
x = np.abs(x)
warnings.warn("Only positive factors will be returned, but negative numbers have a positive and negative factor for each.", UserWarning)
factors = set([1, x])
for i in range(2, int(np.sqrt(x) + 1)):
if (x / float(i)) == int(x / i):
factors.add(int(i))
factors.add(int(x / i))
return factors
def flip_dict(dict, unique_items=False, force_list_values=False):
"""Swap keys and values in a dictionary
Parameters
----------
dict: dictionary
dictionary object to flip
unique_items: bool
whether to assume that all items in dict are unique, potential speedup but repeated items will be lost
force_list_values: bool
whether to force all items in the result to be lists or to let unique items have unwrapped values. Doesn't apply if unique_items is true.
"""
if unique_items:
return {v: k for k, v in dict.items()}
elif force_list_values:
new_dict = {}
for k, v in dict.items():
if v not in new_dict:
new_dict[v] = []
new_dict[v].append(k)
return new_dict
else:
new_dict = {}
for k, v in dict.items():
if v in new_dict:
if isinstance(new_dict[v], list):
new_dict[v].append(k)
else:
new_dict[v] = [new_dict[v], k]
else:
new_dict[v] = k
return new_dict
def num_digits(x):
if x == 0:
return 1
return int(np.ceil(np.log10(np.abs(x) + 1)))
def prime_factors(x):
"""Return the prime factorization of x.
Parameters
----------
x : int
The number to factorize. Must be a non-zero integer
Returns
-------
prime_factors : list
The list of prime factors. Repeated factors will occur multiple times in the list.
"""
if x == 0 or x % 1 != 0:
raise ValueError("Factors can only be found with non-zero integers")
if x < 0:
x = np.abs(x)
warnings.warn("Only positive factors will be returned, but negative numbers have a positive and negative factor for each.", UserWarning)
factors = [x]
prime_factors = []
while len(factors) > 0:
check = factors.pop()
found = False
for i in range(2, int(np.sqrt(check) + 1)):
if (check / float(i)) == int(check / i):
factors.extend([i, int(check / i)])
found = True
break
if not found:
prime_factors.append(check)
return sorted(prime_factors)
def prime_overlap(x, y):
"""Return the prime factors x and y have in common.
Parameters
----------
x : int
The first number to factorize
y: int
The second number to factorize
Returns
-------
overlap : list
The list of common factors. Repeated factors are included for the number of common repeats.
"""
fact_x = prime_factors(x)
fact_y = prime_factors(y)
overlap = []
for i in range(len(fact_x)): # pragma: no branch
item = fact_x.pop()
if item in fact_y:
overlap.append(item)
fact_y.remove(item)
if len(fact_x) == 0 or len(fact_y) == 0:
break
return sorted(overlap)
def rescale(data, new_min=0, new_max=1, axis=None):
"""Rescales data to have range [new_min, new_max] along axis or axes indicated."""
data = np.asarray(data)
if np.issubdtype(data.dtype, np.integer):
data = data.astype(np.float)
data_range = np.max(data, axis=axis, keepdims=True) - np.min(data, axis=axis, keepdims=True)
x = np.divide(data - np.min(data, axis=axis, keepdims=True), data_range, where=data_range > 0, out=np.zeros_like(data))
new_range = new_max - new_min
return (x * new_range) + new_min
def clip(data, output_min=0, output_max=1, input_min=0, input_max=255):
"""Clip an array to a given range, then rescale the clipped array from the input range to the output range.
Parameters
----------
image : numpy.ndarray
The data to rescale
output_min : int | float | np.number
The minimum value for the output data (the default is 0)
output_max : int | float | np.number
The maximum value for the output data (the default is 1)
input_min : int | float | np.number
The minimum value for the input data range (the default is 0)
input_max : int | float | np.number
The maximum value for the input data range (the default is 255)
"""
# TODO - Add tests for this
data = np.clip(data, input_min, input_max)
scaler = (output_max - output_min) / (input_max - input_min)
bias = (input_min * output_min) / (input_max - input_min) - (input_min * output_max) / (input_max - input_min) + output_min
return data * scaler + bias
def sigmoid(x, epsilon=1e-7):
"""Return the sigmoid of the given value/array."""
return (1.0 + epsilon) / (1.0 + np.exp(-x) + epsilon)
def inv_sigmoid(x, epsilon=1e-7):
"""Return the inverse of the sigmoid function for the given value/array."""
return np.log((x + epsilon) / (1 - x + epsilon))
def softmax(x, axis=None):
"""Return the softmax of the array
Parameters
----------
x : numpy.ndarray
The data to apply the softmax to
axis : int | list of ints
The axis or axes to apply softmax across
"""
x = np.asarray(x)
if np.issubdtype(x.dtype, np.integer):
x = x.astype(np.float)
s = np.max(x, axis=axis, keepdims=True)
e_x = np.exp(x - s)
div = np.sum(e_x, axis=axis, keepdims=True)
return np.divide(e_x, div, where=div != 0, out=np.zeros_like(x))
def normalize(data, axis=None):
"""Return data normalized to have zero mean and unit variance along axis or axes indicated."""
data = np.asarray(data)
if np.issubdtype(data.dtype, np.integer):
data = data.astype(np.float)
mean = np.mean(data, axis=axis, keepdims=True)
stddev = np.std(data, axis=axis, keepdims=True)
return np.divide(data - mean, stddev, where=stddev != 0, out=np.zeros_like(data))
def roc_curve(label, pred, as_rates=True):
"""Get the ROC curve for the data.
Parameters
----------
label : numpy.ndarray
The ground truth values
pred : numpy.ndarray
The predicted values
as_rate : bool
Whether to return true/false positive rates or scores (the default is True)
Returns
-------
fps : numpy.ndarray
The false positive rates/scores
tps : numpy.ndarray
The true positive rates/scores
thresh : numpy.ndarray
The thresholds for each fps/tps
"""
if not isinstance(label, np.ndarray):
label = np.array(label)
if not isinstance(pred, np.ndarray):
pred = np.array(pred)
label = np.ravel(label)
pred = np.ravel(pred)
desc_score_indices = np.argsort(pred, kind='mergesort')[::-1]
y_score = pred[desc_score_indices]
y_true = label[desc_score_indices]
distinct_idx = np.where(np.diff(y_score))[0]
thresh_idx = np.concatenate([distinct_idx, [y_true.size - 1]])
tps = np.cumsum(y_true)
# expected = np.sum(y_true)
tps = tps[thresh_idx]
fps = 1 + thresh_idx - tps
thresh = y_score[thresh_idx]
tps = np.concatenate(([0], tps))
fps = np.concatenate(([0], fps))
thresh = np.concatenate(([1], thresh))
if as_rates:
fpr = fps / fps[-1]
tpr = tps / tps[-1]
return fpr, tpr, thresh
else:
return fps, tps, thresh
def mcc_curve(label, pred, optimal_only=False):
"""Get the Matthew's Correlation Coefficient for different thresholds
Parameters
----------
label : numpy.ndarray
Expected labels for the data samples
pred : numpy.ndarray
Predicted labels for the data samples
optimal_only : bool
If true, returns only the value and threshold for the greatest MCC value
"""
fps, tps, thresh = roc_curve(label, pred, as_rates=False)
return optimal_mcc_from_roc(fps, tps, thresh, optimal_only=optimal_only)
def optimal_mcc_from_roc(fps, tps, thresholds, optimal_only=True):
"""Get the Matthew's Correlation Coefficient for different thresholds
Parameters
----------
fps : numpy.ndarray
False positive scores from the roc curve
tps : numpy.ndarray
True positive scores from the roc curve
thresholds : numpy.ndarray
Thresholds from the roc curve
optimal_only : bool
If true, returns only the value and threshold for the greatest MCC value
"""
N = tps[-1] + fps[-1]
S = tps[-1] / N
P = (fps + tps) / N
top = (tps / N) - (S * P)
bottom = np.sqrt(P * S * (1 - S) * (1 - P))
mcc = np.divide(top, bottom, out=np.zeros_like(top), where=bottom != 0)
if optimal_only:
best = np.argmax(mcc)
return mcc[best], thresholds[best]
return mcc, thresholds
def accuracy_curve(label, pred, return_peak=False):
"""Get the accuracy values for each possible threshold in the predictions.
Parameters
----------
label : numpy.ndarray
The true values for each sample in the data.
pred : numpy.ndarray
The predicted values for each sample in the data
return_peak : bool
Whether to return the peak accuracy and best threshold for the data as well as the curve
"""
if not isinstance(label, np.ndarray):
label = np.array(label)
if not isinstance(pred, np.ndarray):
pred = np.array(pred)
desc_score_indices = np.argsort(pred, kind='mergesort')[::-1]
y_score = pred[desc_score_indices]
y_true = label[desc_score_indices]
distinct_idx = np.where(np.diff(y_score))[0]
thresh_idx = np.concatenate([distinct_idx, [y_true.size - 1]])
thresh = y_score[thresh_idx]
tps = np.cumsum(y_true)[thresh_idx]
tns = np.cumsum((1 - y_true)[::-1])[::-1][thresh_idx]
correct = tps + tns
acc = correct / label.size
if return_peak:
peak = np.argmax(acc)
return acc, thresh, acc[peak], thresh[peak]
return acc, thresh
def spec_at_sens(expected, predicted, sensitivities=[0.95]):
"""Get the peak specificity for each sensitivity."""
if not hasattr(sensitivities, '__iter__'):
sensitivities = [sensitivities]
fpr, tpr, thresholds = roc_curve(expected, predicted)
specs = [np.max((1 - fpr)[tpr >= min_sens]) for min_sens in sensitivities]
return specs
def get_confusion_stats(label, pred, threshold=0.5):
"""Get the true positive, false positive, true negative, and false negative values for the given data"""
label = np.squeeze(label)
pred = np.squeeze(pred)
label_bool = label.astype(bool)
pred_bool = pred >= threshold
true_pos = np.logical_and(label_bool, pred_bool).sum()
true_neg = np.logical_and(~label_bool, ~pred_bool).sum()
false_pos = pred_bool.sum() - true_pos
false_neg = (~pred_bool).sum() - true_neg
return true_pos, false_pos, true_neg, false_neg
def dice_coef(label, pred, threshold=0.5):
"""Get the Sorenson Dice Coefficient for the given data"""
tp, fp, tn, fn = get_confusion_stats(label, pred, threshold)
denom = tp * 2 + fp + fn
if denom == 0:
return 0
return (tp * 2) / denom
def jaccard_coef(label, pred, threshold=0.5):
"""Get the Jaccard Coefficient for the given data"""
tp, fp, tn, fn = get_confusion_stats(label, pred, threshold)
denom = tp + fn + fp
if denom == 0:
return 0
return tp / denom
def value_crossing(array, threshold=0, positive_crossing=True, negative_crossing=True, return_indices=False):
"""Get the count of instances where a series crosses a value.
Parameters
----------
array : np.ndarray
A sequential array of values
threshold : int | float
The value used as a crossing point (the default is 0)
positive_crossing : bool
Whether to count when the sequence goes from less than to greater than the threshold value (the default is True)
negative_crossing : bool
Whether to count when the sequence goes from greater than to less than the threshold value (the default is True)
return_indices : bool
Whether to return the indices of the points immediately before the crossings
"""
if not isinstance(array, np.ndarray):
array = | np.array(array) | numpy.array |
"""
The optics module provides simulations of the optics of imaging systems for microscopy
**Conventions:**
arrays follow the ZXY convention, with
- Z : depth axis (axial, focus axis)
- X : horizontal axis (lateral)
- Y : vertical axis (lateral, rotation axis when relevant)
"""
# Copyright (c) 2020 Idiap Research Institute, http://www.idiap.ch/
# Written by <NAME> <<EMAIL>>
#
# This file is part of CBI Toolbox.
#
# CBI Toolbox is free software: you can redistribute it and/or modify
# it under the terms of the 3-Clause BSD License.
#
# CBI Toolbox is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# 3-Clause BSD License for more details.
#
# You should have received a copy of the 3-Clause BSD License along
# with CBI Toolbox. If not, see https://opensource.org/licenses/BSD-3-Clause.
#
# SPDX-License-Identifier: BSD-3-Clause
import math
import astropy.units as u
import numpy as np
import poppy
import scipy.interpolate
from cbi_toolbox.simu import primitives
from cbi_toolbox import utils
def create_wf_1d(wf_object, upsampling=1, scale=1, copy=False):
"""
Create a 1D wavefront object from an existing wavefront
Parameters
----------
wf_object : poppy.FresnelWavefront
the original wavefront
upsampling : int, optional
upsampling factor (does not change the field of view), by default 1
scale : int, optional
zoom factor (changes the field of view), by default 1
copy : bool, optional
return a new object, by default False
Returns
-------
poppy.FresnelWavefront
a 1D wavefront full of 1 with same properties as the input
"""
if copy:
wf_object = wf_object.copy()
wf = np.ones(
(1, int(wf_object.shape[1] * upsampling)), dtype=wf_object.wavefront.dtype)
y, x = np.indices(wf.shape, dtype=float)
x -= wf.shape[1] / 2
wf_object._x = x
wf_object._y = y
wf_object.wavefront = wf
wf_object.pixelscale = wf_object.pixelscale / upsampling * scale
wf_object.n = wf.shape[1]
return wf_object
def wf_to_2d(wf_object, npix=None, copy=False):
"""
Convert a 1D wavefront to 2D (for plotting only)
Parameters
----------
wf_object : poppy.FresnelWavefront
the 1D wavefront
npix : int, optional
crop to a size of npix, by default None
copy : bool, optional
return a new object, by default False
Returns
-------
poppy.FresnelWavefront
the 2D wavefront
"""
if copy:
wf_object = wf_object.copy()
if npix is None:
size = wf_object.shape[1]
else:
size = npix
center = wf_object.shape[1] // 2
hw = size // 2
new_wf = np.zeros_like(wf_object.wavefront, shape=(size, size))
new_wf[hw, :] = wf_object.wavefront[:, center - hw:center + hw]
wf_object.wavefront = new_wf
wf_object._y, wf_object._x = np.indices(wf_object.shape, dtype=float)
wf_object._y -= wf_object.shape[0] / 2.0
wf_object._x -= wf_object.shape[0] / 2.0
return wf_object
def wf_mix(wf1, wf2, ref=None):
"""
Compute a 2D wavefront by multiplying 2 1D wavefronts (for separable propagation)
Parameters
----------
wf1 : poppy.FresnelWavefront
a 1D wavefront
wf2 : poppy.FresnelWavefront
a 1D wavefront
ref : poppy.FresnelWavefront, optional
reference wavefront for the parameters of the output, by default None (wf1 will be used)
Returns
-------
poppy.FresnelWavefront
the 2D mixed wavefront
Raises
------
ValueError
if the input wavefronts have different pixelscales
"""
if wf1.pixelscale != wf2.pixelscale:
raise ValueError("The pixelscale of the input wavefronts must match")
wfa = wf1.wavefront.squeeze()
wfb = wf2.wavefront.squeeze()
mix = np.outer(wfb, wfa)
if ref is None:
wf_m = wf1.copy()
else:
wf_m = ref.copy()
wf_m.wavefront = mix
return wf_m
def resample_wavefront(wf, pixelscale, npixels):
"""
Resample 1D wavefront to new pixelscale
(adapted from poppy.poppy_core._resample_wavefront_pixelscale)
Parameters
----------
wf : poppy.FresnelWavefront
a 1D wavefront
pixelscale : astropy.units.[distance] / astropy.units.pixel
target pixelscale
npixels : int
target size in pixels
Returns
-------
poppy.FresnelWavefront
resampled and resized 1D wavefront
"""
pixscale_ratio = (wf.pixelscale / pixelscale).decompose().value
def make_axis(npix, step):
""" Helper function to make coordinate axis for interpolation """
return step * (np.arange(-npix // 2, npix // 2, dtype=np.float64))
# Input and output axes for interpolation. The interpolated wavefront will be evaluated
# directly onto the detector axis, so don't need to crop afterwards.
x_in = make_axis(wf.shape[1], wf.pixelscale.to(u.m / u.pix).value)
x_out = make_axis(npixels.value, pixelscale.to(u.m / u.pix).value)
def interpolator(arr):
"""
Bind arguments to scipy's RectBivariateSpline function.
For data on a regular 2D grid, RectBivariateSpline is more efficient than interp2d.
"""
return scipy.interpolate.interp1d(
x_in, arr, kind='slinear', copy=False, fill_value=0,
assume_sorted=True, bounds_error=False)
# Interpolate real and imaginary parts separately
real_resampled = interpolator(wf.wavefront.real)(x_out)
imag_resampled = interpolator(wf.wavefront.imag)(x_out)
new_wf = real_resampled + 1j * imag_resampled
# enforce conservation of energy:
new_wf *= 1. / pixscale_ratio
wf.ispadded = False # if a pupil detector, avoid auto-cropping padded pixels on output
wf.wavefront = new_wf
wf.pixelscale = pixelscale
def openspim_illumination(wavelength=500e-9, refr_index=1.333, laser_radius=1.2e-3,
objective_na=0.3, objective_focal=18e-3, slit_opening=10e-3,
pixelscale=635e-9, npix_fov=512, rel_thresh=None,
simu_size=2048, oversample=16):
"""
Compute the illumination function of an OpenSPIM device
Parameters
----------
wavelength : float, optional
illumination wavelength in meters, by default 500e-9
refr_index : float, optional
imaging medium refraction index, by default 1.333
laser_radius : float, optional
source laser radius in meters, by default 1.2e-3
objective_na : float, optional
illumination objective NA, by default 0.3
objective_focal : float, optional
illumination objective focal length in meters, by default 18e-3
slit_opening : float, optional
vertical slit opening in meters, by default 10e-3
pixelscale : float, optional
target pixelscale in meters per pixel, by default 1.3e-3/2048
npix_fov : int, optional
target size in pixels, by default 512
rel_thresh: float, optional
relative threshold to crop the beam thickness
if a full row is below this theshold, all rows after are removed
will be computed as compared to the maximum pixel
simu_size : int, optional
size of the arrays used for simulation, by default 2048
oversample : int, optional
oversampling used for the simulation (must be increased sith simu_size), by default 16
Returns
-------
array [ZXY]
the illumination function
"""
pixel_width = 1
wavelength *= u.m
laser_radius *= u.m
objective_focal *= u.m
pixelscale *= (u.m / u.pixel)
slit_opening *= u.m
noop = poppy.ScalarTransmission()
beam_ratio = 1 / oversample
fov_pixels = npix_fov * u.pixel
detector = poppy.FresnelOpticalSystem()
detector.add_detector(fov_pixels=fov_pixels, pixelscale=pixelscale)
# We approximate the objective aperture with a square one to make it separable
# Given the shape of the wavefront, we estimate the generated error to be negligible
objective_radius = math.tan(
math.asin(objective_na / refr_index)) * objective_focal
objective_aperture = poppy.RectangleAperture(name='objective aperture',
width=2 * objective_radius,
height=2 * objective_radius)
objective_lens = poppy.QuadraticLens(
f_lens=objective_focal, name='objective lens')
obj_aperture = poppy.FresnelOpticalSystem()
obj_aperture.add_optic(objective_aperture, objective_focal)
# Implement the objective lens separately to be able to account for refractive index change
obj_lens = poppy.FresnelOpticalSystem()
obj_lens.add_optic(objective_lens)
# Computed as following: going through T1 then CLens then T2
# is equivalent to going through CLens with focal/4
# Then the radius is computed as the Fourier transform of the input beam, per 2F lens system
w0_y = (12.5e-3 * u.m * wavelength) / (2 * np.pi ** 2 * laser_radius)
laser_shape_y = poppy.GaussianAperture(w=w0_y, pupil_diam=5 * w0_y)
path_y = poppy.FresnelOpticalSystem(
pupil_diameter=2 * w0_y, npix=pixel_width, beam_ratio=beam_ratio)
path_y.add_optic(laser_shape_y)
# Going through T1, slit and T2 is equivalent to going through a half-sized slit,
# then propagating 1/4 the distance
# Since we use 1D propagation, we can increase oversampling a lot for better results
laser_shape_z = poppy.GaussianAperture(
w=laser_radius, pupil_diam=slit_opening / 2)
slit = poppy.RectangleAperture(
name='Slit', width=slit_opening / 2, height=slit_opening / 2)
path_z = poppy.FresnelOpticalSystem(
pupil_diameter=slit_opening / 2, npix=pixel_width, beam_ratio=beam_ratio)
path_z.add_optic(laser_shape_z)
path_z.add_optic(slit)
path_z.add_optic(noop, 0.25 * 100e-3 * u.m)
# Propagate 1D signals
wf_z = path_z.input_wavefront(wavelength=wavelength)
create_wf_1d(wf_z, upsampling=simu_size)
path_z.propagate(wf_z)
wf_y = path_y.input_wavefront(wavelength=wavelength)
create_wf_1d(wf_y, upsampling=simu_size, scale=10)
path_y.propagate(wf_y)
obj_aperture.propagate(wf_z)
obj_aperture.propagate(wf_y)
wf_z.wavelength /= refr_index
wf_y.wavelength /= refr_index
obj_lens.propagate(wf_z)
obj_lens.propagate(wf_y)
illumination = np.empty(
(npix_fov, npix_fov, npix_fov), dtype=wf_z.intensity.dtype)
# Make sure it is centered even if pixels are odd or even
offset = 0 if npix_fov % 2 else 0.5
for pix in range(npix_fov):
pixel = pix - npix_fov // 2 + offset
distance = pixel * pixelscale * u.pixel
psf = poppy.FresnelOpticalSystem()
psf.add_optic(noop, objective_focal + distance)
wfc_y = wf_y.copy()
wfc_z = wf_z.copy()
psf.propagate(wfc_y)
psf.propagate(wfc_z)
resample_wavefront(wfc_y, pixelscale, fov_pixels)
resample_wavefront(wfc_z, pixelscale, fov_pixels)
mix = wf_mix(wfc_y, wfc_z)
mix.normalize()
illumination[:, pix, :] = mix.intensity
if rel_thresh is not None:
illumination = utils.threshold_crop(
illumination, rel_thresh, 0)
return illumination / illumination.sum(0).mean()
def gaussian_psf(npix_lateral=129, npix_axial=129,
pixelscale=635e-9, wavelength=500e-9,
numerical_aperture=0.5, refraction_index=1.33):
"""
Compute an approximate PSF model based on gaussian beam propagation
<NAME>., <NAME>., <NAME>. et al. Gaussian Light Model in Brightfield
Optical Projection Tomography. Sci Rep 9, 13934 (2019).
https://bib-ezproxy.epfl.ch:5295/10.1038/s41598-019-50469-6
Parameters
----------
npix_lateral : int, optional
number of pixels in the lateral direction, by default 129
npix_axial : int, optional
number of pixels in the axial direction, by default 129
pixelscale : float, optional
pixelscale in meters per pixel, by default 1.3e-3/2048
wavelength : float, optional
illumination wavelength in meters, by default 500e-9
numerical_aperture : float, optional
objective NA, by default 0.5
refraction_index : float, optional
imaging medium NA, by default 1.33
Returns
-------
array [ZXY]
the gaussian PSF
"""
# compensate for even/odd pixels so that the PSF is always centered
odd_l = npix_lateral % 2
odd_a = npix_axial % 2
lat_offset = 0 if odd_l else 0.5
ax_offset = 0 if odd_a % 2 else 0.5
r_coords = ( | np.arange((npix_lateral + 1) // 2) | numpy.arange |
# -*- coding: utf-8 -*-
"""Calculate distances to points and compare how the simulated points match
the obtained TOADSuite points.
Notes
-----
1) Some coordinate system transformations need to be done between TOADSuite and
simulated positions - check again.
Created on Thu Jun 11 14:08:53 2020
Acknowledgements
---------------
Thanks to <NAME> for running the simulated audio files on the TOADSuite!
@author: tbeleyur
"""
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import pandas as pd
import scipy.spatial
import tacost
from tacost.calculate_toa import standard_tristar
def calculate_euclidean_error(row_xyz):
'''
'''
return np.sqrt(np.sum(row_xyz**2.0))
def draw_mic_array(mic_xyz, draw_axis):
for i, each in enumerate(mic_xyz):
for j,every in enumerate(mic_xyz):
mic_line = np.row_stack((each,every))
draw_axis.plot(mic_line[:,0],mic_line[:,1],mic_line[:,2],'k', linewidth=0.5)
return draw_axis
#### Tristar points ##########################################################
# load the simulated points:
tristar_point_raw = pd.read_csv('../tristar_source_positions.csv')
# load the obtained output
toadsuite_out = pd.read_excel('../toadsuite_results/CORRECTED_RES_fig1_tristar.xlsx', sheet_name='0000001_001')
toadsuite_xyz = toadsuite_out.iloc[3:,[15,16,17]].copy()
#remove last point bcos for some reason the TOADSuite output doesn't have the last point??!
tristar_points = tristar_point_raw.to_numpy()
toadsuite_points = np.float64(toadsuite_xyz.to_numpy())
tristar_diff = np.abs(tristar_points - toadsuite_points)
tristar_euclidean_error = np.apply_along_axis(calculate_euclidean_error, 1, tristar_diff)
# relative error
distance_from_mic0 = tristar_point_raw.apply(calculate_euclidean_error, 1)
tristar_relative_error = tristar_euclidean_error/distance_from_mic0
points_and_error = np.column_stack((tristar_points, tristar_euclidean_error))
all_data = | np.column_stack((toadsuite_points, points_and_error)) | numpy.column_stack |
import os
import unittest
import pytest
import rasterio
import numpy as np
from geopyspark.geotrellis import SpatialKey, Tile, SpatialPartitionStrategy, RasterizerOptions, Metadata
from shapely.geometry import Polygon, box
from geopyspark.tests.base_test_class import BaseTestClass
from geopyspark.geotrellis.layer import TiledRasterLayer
from geopyspark.geotrellis.constants import LayerType
class MaskTest(BaseTestClass):
pysc = BaseTestClass.pysc
cells = np.zeros((1, 2, 2))
cells.fill(1)
layer = [(SpatialKey(0, 0), Tile(cells, 'FLOAT', -1.0)),
(SpatialKey(1, 0), Tile(cells, 'FLOAT', -1.0,)),
(SpatialKey(0, 1), Tile(cells, 'FLOAT', -1.0,)),
(SpatialKey(1, 1), Tile(cells, 'FLOAT', -1.0,))]
rdd = pysc.parallelize(layer)
extent = {'xmin': 0.0, 'ymin': 0.0, 'xmax': 4.0, 'ymax': 4.0}
layout = {'layoutCols': 2, 'layoutRows': 2, 'tileCols': 2, 'tileRows': 2}
metadata = {'cellType': 'float32ud-1.0',
'extent': extent,
'crs': 4326,
'bounds': {
'minKey': {'col': 0, 'row': 0},
'maxKey': {'col': 1, 'row': 1}},
'layoutDefinition': {
'extent': extent,
'tileLayout': layout}}
geoms = [box(0.0, 0.0, 2.0, 2.0), box(3.0, 3.0, 4.0, 4.0)]
raster_rdd = TiledRasterLayer.from_numpy_rdd(LayerType.SPATIAL, rdd, Metadata.from_dict(metadata))
@pytest.fixture(autouse=True)
def tearDown(self):
yield
BaseTestClass.pysc._gateway.close()
def test_geotrellis_mask(self):
result = self.raster_rdd.mask(geometries=self.geoms).to_numpy_rdd()
n = result.map(lambda kv: np.sum(kv[1].cells)).reduce(lambda a, b: a + b)
self.assertEqual(n, 2.0)
def test_rdd_mask_no_partition_strategy(self):
rdd = BaseTestClass.pysc.parallelize(self.geoms)
result = self.raster_rdd.mask(rdd, options=RasterizerOptions(True, 'PixelIsArea')).to_numpy_rdd()
n = result.map(lambda kv: | np.sum(kv[1].cells) | numpy.sum |
#!/usr/bin/env python
from __future__ import print_function
import sys, os
import pickle, shutil
import re, string, time
import random
import numpy as np
from scipy import optimize
from scipy.fftpack import fft
import matplotlib.pyplot as plt
import matplotlib as mpl
from matplotlib import cm
from mpl_toolkits.mplot3d import Axes3D
import angles
import h5py
import polar
import pandas as pd
def calc_R(c, x, y):
xc, yc = c
return np.sqrt((x-xc)**2 + (y-yc)**2) # an array of individual distances
def f_2(c, x, y):
r_i = calc_R(c, x, y)
return r_i - r_i.mean() # if they were all equal, this would be zero -> perfect circle
class Bubble_properties():
"""
class to handle bubble properties
it works on a sequence of contours
as a function of time
"""
def __init__(self, contours, times=None, start_time=0, normalize_angles=False):
if times is not None:
self.times = times
elif isinstance(contours, dict):
self.times = contours.keys()
else:
print("Missing times. Exit")
sys.exit()
if start_time not in self.times:
print("Start time is not correct")
sys.exit()
# Here we are assuming the contours is a dictionary
self.dws = {}
# Let's also create a pandas df with a set of angles as rows,
# the (real) times as columns, and the distances from the center
self.df = pd.DataFrame()
switches = contours.keys()
diff_switches = np.diff(switches)
#fig = plt.figure()
#ax = fig.add_subplot(111)
for i, switch in enumerate(switches[:-1]):
self.dws[switch] = {}
contour = contours[switch]
x, y = np.transpose(contour)
center = self._fit_center(x, y)
self.dws[switch]['center'] = center
if not i:
center0 = center
thetas = self._get_angles(x, y, center0, normalize=normalize_angles)
#n_new_thetas = len(thetas)
k = len(thetas)
n_new_thetas = 2**(int(np.round((np.log(k)/np.log(2)))))
new_thetas = np.linspace(-np.pi, np.pi, n_new_thetas)
thetas = self._get_angles(x, y, center0, normalize=normalize_angles)
order = np.argsort(thetas)
thetas = thetas[order]
self.dws[switch]['radius'] = self._fit_radius(x, y, center0)
self.dws[switch]['angle'] = thetas
self.dws[switch]['dist'] = self._get_distances(contour)
r = self._get_distances_from_center(contour, center0)
r = r[order]
self.dws[switch]['dist_from_center'] = r
self.dws[switch]['dw'] = contour
if i:
new_r = np.interp(new_thetas, thetas, r)
diff_sw = diff_switches[i]
# check if there are missing switches
# copy the same contour
for k in range(diff_sw):
tm = times[switch+k]
self.df[tm] = new_r
# if i in [10, 50, 100]:
# ax.plot(x, y, '-v')
# X = center0[0] + new_r * np.cos(new_thetas)
# Y = center0[1] + new_r * np.sin(new_thetas)
# ax.plot(X,Y,'o')
# ax.set_aspect('equal')
# ax.grid(True)
# plt.show()
self.df = self.df.set_index(new_thetas, 'thetas')
#print("Setup of bubbles dict done")
#self._get_max_displacement()
@property
def events(self):
pass
def _fit_center(self, x, y):
center_estimate = np.mean(x),np.mean(y)
(xc_m,yc_m), ier = optimize.leastsq(f_2, center_estimate, args=(x,y)) # done by scipy
if not ier:
print("There is a problem to fit the center of the bubble")
return xc_m, yc_m
def _fit_radius(self, x, y, center):
R = calc_R(center, x, y).mean()
return R
def _get_angles(self, x, y, center, normalize=False):
xc, yc = center
X, Y = x - xc, y -yc
_angles = np.arctan2(Y, X)
#_angles = np.arctan(Y, X)
# angle=[2*np.pi+a if a<0 else a for a in angle] # to stay in [0:2pi]
if normalize:
_angles = np.array([angles.normalize(a, 0, 2*np.pi) for a in _angles])
return _angles
def _get_distances(self, contour):
# distance from one point to the next - dist array has same length as dw array
# along the contours, each point's distance from the next is 1 or Sqrt[2]/2
dist = np.sqrt((np.diff(contour,axis=0) ** 2).sum(axis=1))
dist = np.append(dist, 0.) # 1st & last point are the same
return dist
def _get_distances_from_center(self, contour, center):
"""
get an array of the distances from the center
"""
xc, yc = center
#x, y = np.hsplit(contour, 2)
x, y = contour[:,0], contour[:,1]
distCenter = ((x-xc)**2 + (y-yc)**2)**0.5
return distCenter
def _get_max_displacement(self):
tmax, dmax = -1, -1
for switch in self.dws:
xc, yc = self.dws[switch]['center']
x, y = np.hsplit(self.dws[switch]['dw'], 2)
dnew = np.sqrt((x-x0)**2 + (y-y0)**2)
dnew = dnew.mean()
if dnew > dmax:
tmax = switch
dmax = dnew
str0 = "Max center displacement is %5.3f at switch time %s" % (dmax, tmax)
str1 = "%5.3f percent of bubble (at t=%d) radius\n\n" % (100*dmax/self.dws[tmax]['radius'], tmax)
sys.stderr.write("%s, or %s" % (str0, str1))
class CalcSq():
"""
Calculus of the G4 and chi4 values using a dataframe
df has the rows given by the angles of the points
and columns given by the times
"""
def __init__(self, df):
self.mean_radius = df.mean().values
self.times = df.columns
self.thetas = df.index
# Calculus of the delta_h for all the thetas and times
self.h = df
self.dh = self.h - self.mean_radius #
def _calc_S_q(self, ref_i=(3,40), zeta=2./3):
"""
Calculation of the structure factor
As a matter of fact it is a power spectrum in the q space
As the data are done for the angles theta, the first S_q
is calculate for a q which is in terms of angular distance
The calculus for space (along the circle) has to be performed
using first the data at theta which give different r
at different times, so we need to interpolate the data
"""
slope = 1 + 2 * zeta
N_thetas, N_times = self.h.shape
d_theta = self.h.index[1] - self.h.index[0]
# Calculate the q for the theta angles
q_theta = np.linspace(0.0, 1.0/(2.0*d_theta), N_thetas//2)
hq = self.h.apply(np.fft.fft, axis=0)
hq_conj = hq.apply(np.conjugate)
sq = | np.real(hq * hq_conj) | numpy.real |
import numpy as np
import os
import re
import requests
import sys
import time
from netCDF4 import Dataset
import pandas as pd
from bs4 import BeautifulSoup
from tqdm import tqdm
# setup constants used to access the data from the different M2M interfaces
BASE_URL = 'https://ooinet.oceanobservatories.org/api/m2m/' # base M2M URL
SENSOR_URL = '12576/sensor/inv/' # Sensor Information
# setup access credentials
AUTH = ['OOIAPI-853A3LA6QI3L62', '<KEY>']
def M2M_Call(uframe_dataset_name, start_date, end_date):
options = '?beginDT=' + start_date + '&endDT=' + end_date + '&format=application/netcdf'
r = requests.get(BASE_URL + SENSOR_URL + uframe_dataset_name + options, auth=(AUTH[0], AUTH[1]))
if r.status_code == requests.codes.ok:
data = r.json()
else:
return None
# wait until the request is completed
print('Waiting for OOINet to process and prepare data request, this may take up to 20 minutes')
url = [url for url in data['allURLs'] if re.match(r'.*async_results.*', url)][0]
check_complete = url + '/status.txt'
with tqdm(total=400, desc='Waiting') as bar:
for i in range(400):
r = requests.get(check_complete)
bar.update(1)
if r.status_code == requests.codes.ok:
bar.n = 400
bar.last_print_n = 400
bar.refresh()
print('\nrequest completed in %f minutes.' % elapsed)
break
else:
time.sleep(3)
elapsed = (i * 3) / 60
return data
def M2M_Files(data, tag=''):
"""
Use a regex tag combined with the results of the M2M data request to collect the data from the THREDDS catalog.
Collected data is gathered into an xarray dataset for further processing.
:param data: JSON object returned from M2M data request with details on where the data is to be found for download
:param tag: regex tag to use in discriminating the data files, so we only collect the correct ones
:return: the collected data as an xarray dataset
"""
# Create a list of the files from the request above using a simple regex as a tag to discriminate the files
url = [url for url in data['allURLs'] if re.match(r'.*thredds.*', url)][0]
files = list_files(url, tag)
return files
def list_files(url, tag=''):
"""
Function to create a list of the NetCDF data files in the THREDDS catalog created by a request to the M2M system.
:param url: URL to user's THREDDS catalog specific to a data request
:param tag: regex pattern used to distinguish files of interest
:return: list of files in the catalog with the URL path set relative to the catalog
"""
page = requests.get(url).text
soup = BeautifulSoup(page, 'html.parser')
pattern = re.compile(tag)
return [node.get('href') for node in soup.find_all('a', text=pattern)]
def M2M_Data(nclist,variables):
thredds = 'https://opendap.oceanobservatories.org/thredds/dodsC/ooi/'
#nclist is going to contain more than one url eventually
for jj in range(len(nclist)):
url=nclist[jj]
url=url[25:]
dap_url = thredds + url + '#fillmismatch'
openFile = Dataset(dap_url,'r')
for ii in range(len(variables)):
dum = openFile.variables[variables[ii].name]
variables[ii].data = np.append(variables[ii].data, dum[:].data)
tmp = variables[0].data/60/60/24
time_converted = pd.to_datetime(tmp, unit='D', origin=pd.Timestamp('1900-01-01'))
return variables, time_converted
class var(object):
def __init__(self):
"""A Class that generically holds data with a variable name
and the units as attributes"""
self.name = ''
self.data = np.array([])
self.units = ''
def __repr__(self):
return_str = "name: " + self.name + '\n'
return_str += "units: " + self.units + '\n'
return_str += "data: size: " + str(self.data.shape)
return return_str
class structtype(object):
def __init__(self):
""" A class that imitates a Matlab structure type
"""
self._data = []
def __getitem__(self, index):
"""implement index behavior in the struct"""
if index == len(self._data):
self._data.append(var())
return self._data[index]
def __len__(self):
return len(self._data)
def M2M_URLs(platform_name,node,instrument_class,method):
var_list = structtype()
#MOPAK
if platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSPM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/SBS01/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#METBK
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
#FLORT
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/06-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/06-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/04-FLORTK000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
#FDCHP
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/08-FDCHPA000/telemetered/fdchp_a_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#DOSTA
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/02-DOFSTK000/telemetered/dofst_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
#ADCP
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/01-ADCPTA000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/01-ADCPTC000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/01-ADCPTA000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/01-ADCPTC000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/04-ADCPTM000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/04-ADCPTM000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/04-ADCPTC000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/04-ADCPSJ000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
#ZPLSC
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#WAVSS
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
#VELPT
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
#PCO2W
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
#PHSEN
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
#SPKIR
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
#PRESF
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/02-PRESFA000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/02-PRESFA000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/02-PRESFB000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/02-PRESFC000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
#CTDBP
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/06-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/06-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/03-CTDBPE000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
#VEL3D
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
#VEL3DK
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/01-VEL3DK000/telemetered/vel3d_k_wfp_stc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/03-CTDPFK000/telemetered/ctdpf_ckl_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
#PCO2A
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
#PARAD
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/05-PARADK000/telemetered/parad_k__stc_imodem_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
#OPTAA
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/01-OPTAAC000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#NUTNR
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
##
#MOPAK
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/SBD17/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/SBD17/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSPM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSPM/SBS01/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#METBK
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
#FLORT
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/SBD17/06-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/SBD17/06-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
#FDCHP
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/08-FDCHPA000/recovered_host/fdchp_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#DOSTA
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD37/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD37/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD37/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD37/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
#ADCP
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/01-ADCPTA000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/01-ADCPTC000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/01-ADCPTA000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/01-ADCPTC000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/04-ADCPTM000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/04-ADCPTM000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/04-ADCPTC000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/04-ADCPSJ000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
#WAVSS
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
#VELPT
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/SBD17/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD11/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD11/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
#uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/recovered_host/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD11/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD11/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
#PCO2W
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
#PHSEN
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
#SPKIR
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
#PRESF
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/02-PRESFA000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/02-PRESFA000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/02-PRESFB000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/02-PRESFC000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
#CTDBP
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD37/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/SBD17/06-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD37/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/SBD17/06-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD37/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD37/03-CTDBPE000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
#VEL3D
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/01-VEL3DD000/recovered_host/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/01-VEL3DD000/recovered_host/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/01-VEL3DD000/recovered_host/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/01-VEL3DD000/recovered_host/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
#PCO2A
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
#OPTAA
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD37/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD37/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD37/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD37/01-OPTAAC000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#NUTNR
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD37/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/SBD17/06-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD37/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/SBD17/06-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD37/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD37/03-CTDBPE000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE09OSPM/WFP01/03-CTDPFK000/recovered_wfp/ctdpf_ckl_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID26/01-ADCPTA000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID26/01-ADCPTC000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID26/01-ADCPTA000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID26/01-ADCPTC000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/04-ADCPTM000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/04-ADCPTM000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD35/04-ADCPTC000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD35/04-ADCPSJ000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/SBD17/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/SBD11/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/SBD11/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/SBD17/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/SBD11/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/SBD11/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE09OSPM/WFP01/01-VEL3DK000/recovered_wfp/vel3d_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/01-VEL3DD000/recovered_inst/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/01-VEL3DD000/recovered_inst/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD35/01-VEL3DD000/recovered_inst/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD35/01-VEL3DD000/recovered_inst/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/02-PRESFA000/recovered_inst/presf_abc_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'presf_tide_pressure'
var_list[2].name = 'presf_tide_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/02-PRESFA000/recovered_inst/presf_abc_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'presf_tide_pressure'
var_list[2].name = 'presf_tide_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD35/02-PRESFB000/recovered_inst/presf_abc_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'presf_tide_pressure'
var_list[2].name = 'presf_tide_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD35/02-PRESFC000/recovered_inst/presf_abc_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'presf_tide_pressure'
var_list[2].name = 'presf_tide_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD35/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD35/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD35/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD35/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE09OSPM/WFP01/05-PARADK000/recovered_wfp/parad_k__stc_imodem_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID26/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID26/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID26/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID26/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/SBD12/08-FDCHPA000/recovered_inst/fdchp_a_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/SBD17/06-FLORTD000/recovered_inst/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/SBD17/06-FLORTD000/recovered_inst/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE09OSPM/WFP01/04-FLORTK000/recovered_wfp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE09OSPM/WFP01/02-DOFSTK000/recovered_wfp/dofst_k_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD37/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD37/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD37/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD37/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/04-ADCPTM000/recovered_inst/adcpt_m_instrument_log9_recovered'
var_list[0].name = 'time'
var_list[1].name = 'significant_wave_height'
var_list[2].name = 'peak_wave_period'
var_list[3].name = 'peak_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'seconds'
var_list[3].units = 'degrees'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/04-ADCPTM000/recovered_inst/adcpt_m_instrument_log9_recovered'
var_list[0].name = 'time'
var_list[1].name = 'significant_wave_height'
var_list[2].name = 'peak_wave_period'
var_list[3].name = 'peak_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'seconds'
var_list[3].units = 'degrees'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'CTD' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/06-CTDBPN106/streamed/ctdbp_no_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_no_seawater_pressure'
var_list[5].name = 'ctdbp_no_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'CTD' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/06-CTDBPO108/streamed/ctdbp_no_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_no_seawater_pressure'
var_list[5].name = 'ctdbp_no_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'DOSTA' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/06-CTDBPN106/streamed/ctdbp_no_sample'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'DOSTA' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/06-CTDBPO108/streamed/ctdbp_no_sample'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'PHSEN' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/10-PHSEND103/streamed/phsen_data_record'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'PHSEN' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/10-PHSEND107/streamed/phsen_data_record'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'PCO2W' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/09-PCO2WB103/streamed/pco2w_b_sami_data_record'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'PCO2W' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/09-PCO2WB104/streamed/pco2w_b_sami_data_record'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'ADCP' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/05-ADCPTB104/streamed/adcp_velocity_beam'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'ADCP' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/05-ADCPSI103/streamed/adcp_velocity_beam'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'VEL3D' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/07-VEL3DC108/streamed/vel3d_cd_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'VEL3D' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/07-VEL3DC107/streamed/vel3d_cd_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'OPTAA' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/08-OPTAAD106/streamed/optaa_sample'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'OPTAA' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/08-OPTAAC104/streamed/optaa_sample'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#CSPP Data below
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/08-FLORTJ000/telemetered/flort_dj_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/08-FLORTJ000/recovered_cspp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/08-FLORTJ000/telemetered/flort_dj_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/08-FLORTJ000/recovered_cspp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/02-DOSTAJ000/telemetered/dosta_abcdjm_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/02-DOSTAJ000/recovered_cspp/dosta_abcdjm_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/02-DOSTAJ000/telemetered/dosta_abcdjm_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/02-DOSTAJ000/recovered_cspp/dosta_abcdjm_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/09-CTDPFJ000/telemetered/ctdpf_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/09-CTDPFJ000/recovered_cspp/ctdpf_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/09-CTDPFJ000/telemetered/ctdpf_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/09-CTDPFJ000/recovered_cspp/ctdpf_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/10-PARADJ000/telemetered/parad_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/10-PARADJ000/recovered_cspp/parad_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/10-PARADJ000/telemetered/parad_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/10-PARADJ000/recovered_cspp/parad_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'NUTNR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/06-NUTNRJ000/recovered_cspp/nutnr_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'salinity_corrected_nitrate'
var_list[2].name = 'nitrate_concentration'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'NUTNR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/06-NUTNRJ000/recovered_cspp/nutnr_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'salinity_corrected_nitrate'
var_list[2].name = 'nitrate_concentration'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/07-SPKIRJ000/telemetered/spkir_abj_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/07-SPKIRJ000/recovered_cspp/spkir_abj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/07-SPKIRJ000/telemetered/spkir_abj_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/07-SPKIRJ000/recovered_cspp/spkir_abj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/05-VELPTJ000/telemetered/velpt_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/05-VELPTJ000/recovered_cspp/velpt_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/05-VELPTJ000/telemetered/velpt_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/05-VELPTJ000/recovered_cspp/velpt_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'OPTAA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/04-OPTAAJ000/recovered_cspp/optaa_dj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'OPTAA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/04-OPTAAJ000/recovered_cspp/optaa_dj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/07-FLORTJ000/recovered_cspp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/07-FLORTJ000/recovered_cspp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/01-DOSTAJ000/recovered_cspp/dosta_abcdjm_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/01-DOSTAJ000/recovered_cspp/dosta_abcdjm_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/08-CTDPFJ000/recovered_cspp/ctdpf_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/08-CTDPFJ000/recovered_cspp/ctdpf_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/09-PARADJ000/recovered_cspp/parad_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/09-PARADJ000/recovered_cspp/parad_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'NUTNR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/05-NUTNRJ000/recovered_cspp/nutnr_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'salinity_corrected_nitrate'
var_list[2].name = 'nitrate_concentration'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'NUTNR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/05-NUTNRJ000/recovered_cspp/nutnr_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'salinity_corrected_nitrate'
var_list[2].name = 'nitrate_concentration'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/06-SPKIRJ000/recovered_cspp/spkir_abj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/06-SPKIRJ000/recovered_cspp/spkir_abj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/02-VELPTJ000/recovered_cspp/velpt_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/02-VELPTJ000/recovered_cspp/velpt_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'OPTAA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/04-OPTAAJ000/recovered_cspp/optaa_dj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'OPTAA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/04-OPTAAJ000/recovered_cspp/optaa_dj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL386/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL386/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL384/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL384/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL383/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL383/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL382/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL382/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL381/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL381/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL327/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL327/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL326/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL326/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL320/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL320/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL319/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL319/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL312/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL312/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL311/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL311/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL247/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL247/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL386/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL386/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL384/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL384/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL383/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL383/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL382/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL382/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL381/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL381/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL327/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL327/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL326/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL326/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL320/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL320/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL319/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL319/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL312/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL312/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL311/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL311/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL247/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL247/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL386/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL386/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL384/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL384/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL383/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL383/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL382/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL382/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL381/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL381/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL327/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL327/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL326/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL326/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL320/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL320/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL319/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL319/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL312/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL312/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL311/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL311/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL247/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL247/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL386/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL386/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL384/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL384/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL383/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL383/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL382/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL382/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL381/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL381/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL327/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL327/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL326/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL326/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL320/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL320/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL319/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL319/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL312/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL312/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL311/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL311/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL247/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL247/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL386/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL384/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL383/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL382/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL381/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL327/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL326/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL320/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL319/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL312/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL311/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL247/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD11/06-METBKA000/telemetered/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD11/06-METBKA000/recovered_host/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD11/06-METBKA000/telemetered/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD11/06-METBKA000/recovered_host/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD11/06-METBKA000/telemetered/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD11/06-METBKA000/recovered_host/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD11/06-METBKA000/telemetered/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD11/06-METBKA000/recovered_host/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_mean_directional'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_mean_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_mean_directional'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_mean_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_mean_directional'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_mean_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_mean_directional'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_mean_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_non_directional'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_non_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_non_directional'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_non_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_non_directional'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_non_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_non_directional'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_non_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_motion'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_motion_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_motion'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_motion_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_motion'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_motion_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_motion'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_motion_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_fourier'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_fourier_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_fourier'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_fourier_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_fourier'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_fourier_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_fourier'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_fourier_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/2A-CTDPFA107/streamed/ctdpf_sbe43_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'seawater_pressure'
var_list[5].name = 'seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSPD' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSPD/DP01B/01-CTDPFL105/recovered_inst/dpc_ctd_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'dpc_ctd_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSPD' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE04OSPD/DP01B/01-CTDPFL105/recovered_wfp/dpc_ctd_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'dpc_ctd_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/2A-CTDPFA107/streamed/ctdpf_sbe43_sample'
var_list[0].name = 'time'
var_list[1].name = 'corrected_dissolved_oxygen'
var_list[2].name = 'seawater_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'dbar'
elif platform_name == 'CE04OSPD' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSPD/DP01B/06-DOSTAD105/recovered_inst/dpc_optode_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE04OSPD' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE04OSPD/DP01B/06-DOSTAD105/recovered_wfp/dpc_optode_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/3A-FLORTD104/streamed/flort_d_data_record'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE04OSPD' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSPD/DP01B/04-FLNTUA103/recovered_inst/dpc_flnturtd_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'flntu_x_mmp_cds_fluorometric_chlorophyll_a'
var_list[2].name = 'flntu_x_mmp_cds_total_volume_scattering_coefficient '
var_list[3].name = 'flntu_x_mmp_cds_bback_total'
var_list[4].name = 'flcdr_x_mmp_cds_fluorometric_cdom'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'ug/L'
var_list[2].units = 'm-1 sr-1'
var_list[3].units = 'm-1'
var_list[4].units = 'ppb'
var_list[5].units = 'dbar'
elif platform_name == 'CE04OSPD' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE04OSPD/DP01B/03-FLCDRA103/recovered_wfp/dpc_flcdrtd_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'flntu_x_mmp_cds_fluorometric_chlorophyll_a'
var_list[2].name = 'flntu_x_mmp_cds_total_volume_scattering_coefficient '
var_list[3].name = 'flntu_x_mmp_cds_bback_total'
var_list[4].name = 'flcdr_x_mmp_cds_fluorometric_cdom'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'ug/L'
var_list[2].units = 'm-1 sr-1'
var_list[3].units = 'm-1'
var_list[4].units = 'ppb'
var_list[5].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'PHSEN' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/2B-PHSENA108/streamed/phsen_data_record'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'ph_seawater'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/3C-PARADA102/streamed/parad_sa_sample'
var_list[0].name = 'time'
var_list[1].name = 'par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/3D-SPKIRA102/streamed/spkir_data_record'
var_list[0].name = 'time'
var_list[1].name = 'spkir_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'NUTNR' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/4A-NUTNRA102/streamed/nutnr_a_sample'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'PCO2W' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/4F-PCO2WA102/streamed/pco2w_a_sami_data_record'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
var_list[3].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/4B-VELPTD106/streamed/velpt_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'velpt_d_eastward_velocity'
var_list[2].name = 'velpt_d_northward_velocity'
var_list[3].name = 'velpt_d_upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[9].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
var_list[9].units = 'dbar'
elif platform_name == 'CE04OSPD' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSPD/DP01B/02-VEL3DA105/recovered_inst/dpc_acm_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_a_eastward_velocity'
var_list[2].name = 'vel3d_a_northward_velocity'
var_list[3].name = 'vel3d_a_upward_velocity_ascending'
var_list[4].name = 'vel3d_a_upward_velocity_descending'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'm/s'
var_list[5].units = 'dbar'
elif platform_name == 'CE04OSPD' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE04OSPD/DP01B/02-VEL3DA105/recovered_wfp/dpc_acm_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_a_eastward_velocity'
var_list[2].name = 'vel3d_a_northward_velocity'
var_list[3].name = 'vel3d_a_upward_velocity_ascending'
var_list[4].name = 'vel3d_a_upward_velocity_descending'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'm/s'
var_list[5].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PLATFORM200M' and instrument_class == 'CTD' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/PC01B/4A-CTDPFA109/streamed/ctdpf_optode_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'seawater_pressure'
var_list[5].name = 'seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSPS' and node == 'PLATFORM200M' and instrument_class == 'DOSTA' and method == 'Streamed':
#uframe_dataset_name = 'CE04OSPS/PC01B/4A-DOSTAD109/streamed/ctdpf_optode_sample'
uframe_dataset_name = 'CE04OSPS/PC01B/4A-CTDPFA109/streamed/ctdpf_optode_sample'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'seawater_pressure' #also use this for the '4A-DOSTAD109/streamed/ctdpf_optode_sample' stream
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PLATFORM200M' and instrument_class == 'PHSEN' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/PC01B/4B-PHSENA106/streamed/phsen_data_record'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE04OSPS' and node == 'PLATFORM200M' and instrument_class == 'PCO2W' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/PC01B/4D-PCO2WA105/streamed/pco2w_a_sami_data_record'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
#Coastal Pioneer CSM Data Streams
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'METBK2' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD12/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'METBK2' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD12/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CP03ISSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CP03ISSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CP04OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CP04OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
#WAVSS
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_mean_directional'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_mean_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_non_directional'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_non_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_motion'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_motion_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_fourier'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_fourier_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
#PCO2A
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CP03ISSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CP04OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
#PCO2A
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CP03ISSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CP04OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
#FDCHP
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'RecoveredInst':
uframe_dataset_name = 'CP01CNSM/SBD12/08-FDCHPA000/recovered_inst/fdchp_a_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD12/08-FDCHPA000/telemetered/fdchp_a_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD12/08-FDCHPA000/recovered_host/fdchp_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD11/06-METBKA000/telemetered/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD11/06-METBKA000/recovered_host/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CP03ISSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/SBD11/06-METBKA000/telemetered/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CP03ISSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/SBD11/06-METBKA000/recovered_host/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CP04OSSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/SBD11/06-METBKA000/telemetered/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CP04OSSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/SBD11/06-METBKA000/recovered_host/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'METBK2-hr' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD12/06-METBKA000/telemetered/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'METBK2-hr' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD12/06-METBKA000/recovered_host/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CP01CNSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CP03ISSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CP04OSSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/MFD37/03-CTDBPE000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/MFD37/03-CTDBPE000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CP04OSSM/MFD37/03-CTDBPE000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/MFD37/03-CTDBPD000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/MFD37/03-CTDBPD000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CP03ISSM/MFD37/03-CTDBPD000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/MFD37/03-CTDBPD000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/MFD37/03-CTDBPD000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CP01CNSM/MFD37/03-CTDBPD000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/MFD37/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = | np.array([]) | numpy.array |
import tensorflow as tf
import pdb
import numpy as np
import os
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import ImageGrid
from mpl_toolkits.axes_grid1 import make_axes_locatable
import scipy
import myParams
def getHome():
# return '/home/deni/'
# return '/media/a/f38a5baa-d293-4a00-9f21-ea97f318f647/home/a/'
# return '/media/a/H2/home/a/'
return '/opt/data/'
def getDatasetsBase():
# return '/home/deni/'
return '/media/a/H1/TFDatasets/'
def getParam_tmpF(s):
try:
return int(s)
except ValueError:
try:
return float(s)
except ValueError:
try:
return np.array(list(map(int, s.split(','))))
except ValueError:
try:
return np.array(list(map(float, s.split(','))))
except ValueError:
return s
def readParamsTxt(ParamFN):
ParamsD = {}
with open(ParamFN) as f:
for line in f:
if len(line)<3:
continue
# print(line)
#print(line.replace("\n",""))
(key,val,X)=(line+' a').split(maxsplit=2)
# (key, val) = line.split()
valx=getParam_tmpF(val)
ParamsD[key] = valx
myParams.myDict[key]=ParamsD[key]
# print(key + " : " + str(val) + " " + type(valx).__name__)
def getparam(S):
try:
return myParams.myDict[S]
except ValueError:
print('Couldnt find parameter: ' + S)
return 0
def setparam(S,V):
myParams.myDict[S]=V
return
def ConcatCOnDim(X,dim):
# return tf.cast(tf.concat([tf.real(X),tf.imag(X)],axis=dim),tf.float32)
return tf.concat([tf.real(X),tf.imag(X)],axis=dim)
def ConcatRIOn0(X): return tf.concat([tf.real(X),tf.imag(X)],axis=0)
def ConcatRIOn1(X): return tf.concat([tf.real(X),tf.imag(X)],axis=1)
def ConcatRIOn2(X): return tf.concat([tf.real(X),tf.imag(X)],axis=2)
def ConcatRIOn3(X): return tf.concat([tf.real(X),tf.imag(X)],axis=3)
def ConcatRIOn4(X): return tf.concat([tf.real(X),tf.imag(X)],axis=4)
def ConcatRIOn5(X): return tf.concat([tf.real(X),tf.imag(X)],axis=5)
def ConcatRIOn6(X): return tf.concat([tf.real(X),tf.imag(X)],axis=6)
def ConcatRIOn7(X): return tf.concat([tf.real(X),tf.imag(X)],axis=7)
def ConcatCOnDimWithStack(X,dim):
# return tf.cast(tf.concat([tf.stack([tf.real(X)],axis=dim),tf.stack([tf.imag(X)],axis=dim)],axis=dim),tf.float32)
return tf.concat([tf.stack([tf.real(X)],axis=dim),tf.stack([tf.imag(X)],axis=dim)],axis=dim)
def NP_ConcatCOnDim(X,dim):
return np.float32(np.concatenate((np.real(X),np.imag(X)),axis=dim))
def NP_ConcatRIOn0(X): return NP_ConcatCOnDim(X,0)
def NP_ConcatRIOn1(X): return NP_ConcatCOnDim(X,1)
def NP_ConcatRIOn2(X): return NP_ConcatCOnDim(X,2)
def NP_ConcatRIOn3(X): return NP_ConcatCOnDim(X,3)
def NP_ConcatRIOn4(X): return NP_ConcatCOnDim(X,4)
def NP_ConcatRIOn5(X): return NP_ConcatCOnDim(X,5)
def NP_ConcatRIOn6(X): return NP_ConcatCOnDim(X,6)
def NP_fft2d_on6d(X): return np.transpose(np.fft.fft2(np.transpose(X,(2,3,4,5,0,1))),(4,5,0,1,2,3))
def NP_ifft2d_on6d(X): return np.transpose(np.fft.ifft2(np.transpose(X,(2,3,4,5,0,1))),(4,5,0,1,2,3))
# def RItoCon4(X):
# return tf.squeeze(tf.complex(tf.slice(X,[0,0,0,0],[-1,-1,-1,1]),tf.slice(X,[0,0,0,1],[-1,-1,-1,1])))
# def RItoCon4(X):
# return tf.squeeze(tf.complex(tf.slice(X,[0,0,0,0],[batch_size,H,W,1]),tf.slice(X,[0,0,0,1],[batch_size,H,W,1])))
def NP_addDim(X): return np.stack([X],axis=-1)
def TF_addDim(X): return tf.stack([X],axis=-1)
def TF_2d_to_3d(X): return tf.stack([X],axis=2)
def TF_3d_to_4d(X): return tf.stack([X],axis=3)
def TF_4d_to_5d(X): return tf.stack([X],axis=4)
def TF_5d_to_6d(X): return tf.stack([X],axis=5)
def TF_2d_to_4d(X): return TF_3d_to_4d(TF_2d_to_3d(X))
def TF_2d_to_5d(X): return TF_4d_to_5d(TF_3d_to_4d(TF_2d_to_3d(X)))
def TF_3d_to_5d(X): return TF_4d_to_5d(TF_3d_to_4d(X))
def TF_fft2d_on5d(X): return tf.transpose(tf.fft2d(tf.transpose(X,[2,3,4,0,1])),[3,4,0,1,2])
def TF_ifft2d_on5d(X): return tf.transpose(tf.ifft2d(tf.transpose(X,[2,3,4,0,1])),[3,4,0,1,2])
def TF_fft2d_on6d(X): return tf.transpose(tf.fft2d(tf.transpose(X,[2,3,4,5,0,1])),[4,5,0,1,2,3])
def TF_ifft2d_on6d(X): return tf.transpose(tf.ifft2d(tf.transpose(X,[2,3,4,5,0,1])),[4,5,0,1,2,3])
def TF_fft2d_on7d(X): return tf.transpose(tf.fft2d(tf.transpose(X,[2,3,4,5,6,0,1])),[5,6,0,1,2,3,4])
def TF_ifft2d_on7d(X): return tf.transpose(tf.ifft2d(tf.transpose(X,[2,3,4,5,6,0,1])),[5,6,0,1,2,3,4])
def TF_fft2d_onNd(X,N): return tf.transpose(tf.fft2d(tf.transpose(X,np.concatenate((np.arange(2,N),[0,1]),axis=0))),np.concatenate(([N-2,N-1],np.arange(0,N-2)),axis=0))
def TF_ifft2d_onNd(X,N): return tf.transpose(tf.ifft2d(tf.transpose(X,np.concatenate((np.arange(2,N),[0,1]),axis=0))),np.concatenate(([N-2,N-1],np.arange(0,N-2)),axis=0))
def TF_fft2d_on3d(X): return tf.transpose(tf.fft2d(tf.transpose(X,[2,0,1])),[1,2,0])
def TF_ifft2d_on3d(X): return tf.transpose(tf.ifft2d(tf.transpose(X,[2,0,1])),[1,2,0])
def tfrm(X): return tf.reduce_mean(tf.abs(X))
def rms(X): return np.sqrt(np.mean(np.square(np.abs(X))))
def TF_rms(X): return tf.sqrt(tf.reduce_mean(tf.square(tf.abs(X))))
def QuickCompare(Ref,X):
return [rms(Ref),rms(X),rms(Ref-X),rms(Ref)/rms(Ref-X)]
def toep(X,Kern,H,W):
return np.fft.ifft2(np.fft.fft2(np.pad(X,((0,H),(0,W)),'constant'),axes=(0,1))*Kern,axes=(0,1))[:H,:W]
def TF_toep(X,Kern,H,W):
return tf.ifft2d(tf.fft2d(tf.pad(X,((0,H),(0,W)),'constant'))*Kern)[:H,:W]
def cgp(x0, A, b, mit, stol, bbA):
# def [x, k] = cgp(x0, A, C, b, mit, stol, bbA, bbC):
# https://en.wikipedia.org/wiki/Conjugate_gradient_method#Example_code_in_MATLAB_/_GNU_Octave_2
x = x0;
ha = 0;
hp = 0;
hpp = 0;
ra = 0;
rp = 0;
rpp = 0;
u = 0;
k = 0;
ra = b - bbA(A, x0); # <--- ra = b - A * x0;
while rms(ra) > stol:
ha=ra
k = k + 1;
if (k == mit):
print('GCP:MAXIT: mit reached, no conversion.');
return x,k
hpp = hp;
rpp = rp;
hp = ha;
rp = ra;
t = np.sum(np.conj(rp)*hp)
if k == 1:
u = hp;
else:
u = hp + (t / np.sum(np.conj(rpp)*hpp)) * u;
Au = bbA(A, u) # <--- Au = A * u;
Fac=np.sum(np.conj(u)*Au)
a = t / Fac
x = x + a * u;
ra = rp - a * Au;
return x,k
def TF_cgp(x0, A, b, mit, stol, bbA):
x = x0;
ha = 0;
hp = 0;
hpp = 0;
ra = 0;
rp = 0;
rpp = 0;
u = 0;
k = 0;
ra = b - bbA(A, x0); # <--- ra = b - A * x0;
while TF_rms(ra) > stol:
ha=ra
k = k + 1;
if (k == mit):
print('GCP:MAXIT: mit reached, no conversion.');
return x,k
hpp = hp;
rpp = rp;
hp = ha;
rp = ra;
t = tf.reduce_sum(tf.conj(rp)*hp)
if k == 1:
u = hp;
else:
u = hp + (t / tf.reduce_sum(tf.conj(rpp)*hpp)) * u;
Au = bbA(A, u) # <--- Au = A * u;
Fac=tf.reduce_sum(tf.conj(u)*Au)
a = t / Fac
x = x + a * u;
ra = rp - a * Au;
return x,k
def NP_NUFFT_forw(X,SN,P,H,W):
return P*np.reshape(np.fft.fft2(np.pad(X*SN,((0,H),(0,W)),'constant')),-1)
# def back(X,SN,P,H,W):
# return np.fft.ifft2(np.reshape(np.conj(P.T)*X,((H*2,W*2))),axes=(0,1))[:H,:W]*np.conj(SN)
def NP_NUFFT_back(X,SN,P,H,W):
return (np.fft.ifft2(np.reshape(np.conj(np.transpose(P))*X,(H*2,W*2)))[:H,:W])*np.conj(SN)
def NP_NUFFT_forwWback(X,Wx,SN,P,H,W):
return NP_NUFFT_back(NP_NUFFT_forw(X,SN,P,H,W)*Wx,SN,P,H,W)
def NP_NUFFTHNUFFT_WithW(I,SN,P,CurW,H,W):
Step1=I*SN
Pad=np.pad(Step1,((0,H),(0,W)),'constant')
F=np.fft.fft2(Pad)
Col=np.reshape(F,(-1))
Sig=P*Col
Sig=Sig*CurW
# Out=back(Sig,SN,P,H,W)
Step1=np.conj(np.transpose(P))*Sig
Step1=np.reshape(Step1,(H*2,W*2))
F=np.fft.ifft2(Step1)
Cropped=F[:H,:W]
Out=Cropped*np.conj(SN)
return Out
def NUFFT_to_ToepKern(Wx,SN,P,H,W):
# NUFFT to ToepKern
v11=np.zeros((H,W),np.complex128)
v12=np.zeros((H,W),np.complex128)
v21=np.zeros((H,W),np.complex128)
v22=np.zeros((H,W),np.complex128)
v11[0,0]=1
v12[0,-1]=1
v21[-1,0]=1
v22[-1,-1]=1
block11=NP_NUFFTHNUFFT_WithW(v11,SN,P,Wx,H,W)
block12=NP_NUFFTHNUFFT_WithW(v12,SN,P,Wx,H,W)
block21=NP_NUFFTHNUFFT_WithW(v21,SN,P,Wx,H,W)
block22=NP_NUFFTHNUFFT_WithW(v22,SN,P,Wx,H,W)
Big=np.zeros((H*2,W*2),np.complex128)
Big[:H,:W]=block22;
Big[H-1:-1,W-1:-1]=block11;
Big[:H,W-1:-1]=block21;
Big[H-1:-1,:W]=block12;
Bigc=np.roll(Big,(-H+1,-W+1),(0,1))
TKern=np.fft.fft2(Bigc)
return TKern
# QuickCompare(TKern,TKern1)
def _glorot_initializer_g(units, stddev_factor=1.0):
"""Initialization in the style of Glorot 2010.
stddev_factor should be 1.0 for linear activations, and 2.0 for ReLUs"""
stddev = np.sqrt(stddev_factor / np.sqrt(np.prod(units)))
return tf.truncated_normal(units,mean=0.0, stddev=stddev)
""" Example use of TF_TSNUFFT:
B0Data=scipy.io.loadmat('/media/a/H1/MoreDataForTFNUFT.mat')
Sens=B0Data['Sens']
TSBF=B0Data['TSBF']
TSC=B0Data['TSC']
NUFTData=scipy.io.loadmat('/media/a/DATA/180628_AK/meas_MID244_gBP_VD11_U19_G35S155_4min_FID22439/TrajForNUFT.mat')
Kd=NUFTData['Kd']
P=NUFTData['P']
SN=NUFTData['SN']
Trajm2=NUFTData['Trajm2']
SmpI=scipy.io.loadmat('/media/a/H1/SmpI.mat')
SmpI=SmpI['SmpI']
nTraj=Trajm2.shape[1]
nCh=Sens.shape[2]
nTSC=TSC.shape[2]
SNc,paddings,sp_R,sp_I,TSBFX=GT.TF_TSNUFFT_Prepare(SN,Sens,TSC,TSBF,Kd,P)
Out=GT.TF_TSNUFFT_Run(SmpI,SNc,paddings,nTraj,nTSC,nCh,sp_R,sp_I,TSBFX)
SOut={}
SOut['Out']=Out
scipy.io.savemat('/media/a/H1/TFTSNUFTOut.mat',SOut)
"""
# def TS_NUFFT_OPHOP(InImage,TSCSens,H,W,batch_size,paddingsY,nTSC,nCh,fftkernc5D):
# InImage=tf.stack([tf.stack([InImage],axis=3)],axis=4)
# InImage=tf.transpose(InImage,[1,2,3,4,0])
# Step1=tf.multiply(InImage,TSCSens)
# Padded=tf.pad(Step1, paddingsY, "CONSTANT")
# Step2=tf.transpose(tf.fft2d(tf.transpose(Padded,perm=[2,3,4,0,1])),[3,4,0,1,2])
# Step2=tf.multiply(Step2,fftkernc5D)
# Step2=tf.transpose(tf.ifft2d(tf.transpose(Step2,perm=[2,3,4,0,1])),[3,4,0,1,2])
# Cropped=tf.slice(Step2,[0,0,0,0,0],[H,W,nTSC,nCh,batch_size])
# Step3=tf.multiply(Cropped,tf.conj(TSCSens))
# Step3=tf.reduce_sum(Step3,axis=[2,3])
# Step3=tf.transpose(Step3,[2,0,1])
# return Step3
def blocksToFftkern(block1,block2):
(N1,N2)=block1.shape
z1 = np.zeros((N1,1))
z2 = np.zeros((N1-1,1))
Row1=np.concatenate((block1,z1,np.conj(np.flip(np.concatenate((block1[0:1,1:],block2[1:,1:]),axis=0),axis=1)) ),axis=1)
Row2=np.concatenate((np.flip(block2[1:,:],axis=0),z2,np.flip(np.flip(np.conj(block1[1:,1:]),axis=0),axis=1)),axis=1)
tmp1a=np.concatenate((Row1,np.zeros((1,N2*2)),Row2),axis=0)
tmp2a=np.conj(np.flip(np.flip(np.roll(np.roll(tmp1a,-1,axis=0),-1,axis=1),axis=0),axis=1))
kern=(tmp1a+tmp2a)/2
fftkerna=np.fft.fft2(kern)
fftkerna=np.real(fftkerna)
return fftkerna
def GetTSCoeffsByLinear(N,L):
M=np.zeros((N,L))
Ttimes=np.linspace(0,1,L);
xnew = np.linspace(0, 1, N)
for i in range(0,L):
# print(i)
tmp=np.zeros((L))
tmp[i]=1
f=scipy.interpolate.interp1d(Ttimes,tmp)
M[:,i]=f(xnew)
return M
def NP_Cartesian_OPHOP_ITS_MB(InImage,Sens6,Msk):
# InImage is batch_size,H,W,nTSC,MB
# Sens6 is H,W,/nTSC/,nCh,MB,batch_size
InImage=NP_addDim(InImage)
InImage=np.transpose(InImage,(1,2,3,5,4,0)) # H,W,nTSC,/nCh/,MB,batch_size
Step1=InImage*Sens6 # H,W,nTSC,nCh,MB,batch_size
F=NP_fft2d_on6d(Step1)
MF=F*Msk
IMF=NP_ifft2d_on6d(MF)
SIMF=IMF*np.conj(Sens6)
Step2=np.sum(SIMF,axis=3) # H,W,nTSC,MB,batch_size
Step3=np.transpose(Step2,(4,0,1,2,3)) # batch_size,H,W,nTSC,MB
return Step3 # batch_size,H,W,nTSC,MB
def Cartesian_OPHOP_ITS_MB(InImage,Sens6,Msk):
# InImage is batch_size,H,W,nTSC,MB
# Sens6 is H,W,/nTSC/,nCh,MB,batch_size
InImage=TF_addDim(InImage)
InImage=tf.transpose(InImage,[1,2,3,5,4,0]) # H,W,nTSC,/nCh/,MB,batch_size
Step1=InImage*Sens6 # H,W,nTSC,nCh,MB,batch_size
F=TF_fft2d_on6d(Step1)
MF=F*Msk
IMF=TF_ifft2d_on6d(MF)
SIMF=IMF*tf.conj(Sens6)
Step2=tf.reduce_sum(SIMF,axis=[3]) # H,W,nTSC,MB,batch_size
Step3=tf.transpose(Step2,[4,0,1,2,3]) # batch_size,H,W,nTSC,MB
return Step3 # batch_size,H,W,nTSC,MB
def TS_NUFFT_OPHOP_ITS_MB(InImage,Sens6,H,W,batch_size,paddingsYMB,nTSC,nCh,fftkernc7):
# InImage is batch_size,H,W,nTSC,MB
# Sens6 is H,W,/nTSC/,nCh,MB,batch_size
# fftkernc7 is # H*2,W*2,nTSC,/nCh/,MB,/batch_size/,MBaux
InImage=TF_addDim(InImage) # batch_size,H,W,nTSC,MB,/nCh/
InImage=tf.transpose(InImage,[1,2,3,5,4,0]) # H,W,nTSC,/nCh/,MB,batch_size
Step1=InImage*Sens6 # H,W,nTSC,nCh,MB,batch_size
Padded=tf.pad(Step1, paddingsYMB, "CONSTANT") # H*2,W*2,nTSC,nCh,MB,batch_size
Step2=TF_fft2d_on6d(Padded) # H*2,W*2,nTSC,nCh,MB,batch_size
Step2=TF_addDim(Step2) # H*2,W*2,nTSC,nCh,MB,batch_size,/MBaux/
Step2=Step2*fftkernc7 # H*2,W*2,nTSC,nCh,MB,batch_size,MBaux
Step2=TF_ifft2d_on7d(Step2) # H*2,W*2,nTSC,nCh,MB,batch_size,MBaux
# Cropped=tf.slice(Step2,[0,0,0,0,0],[H,W,-1,-1,-1])
Cropped=Step2[:H,:W,:,:,:,:,:] # H,W,nTSC,nCh,MB,batch_size,MBaux
Step3a=Cropped*tf.conj(TF_addDim(Sens6))
Step3=tf.reduce_sum(Step3a,axis=[3,4]) # H,W,nTSC,batch_size,MBaux
Step3=tf.transpose(Step3,[3,0,1,2,4]) # batch_size,H,W,nTSC,MB?aux?
return Step3 # batch_size,H,W,nTSC,MB?aux?
def TS_NUFFT_OPHOP_ITS(InImage,Sens5,H,W,batch_size,paddingsY,nTSC,nCh,fftkernc5):
# InImage is batch_size,H,W,nTSC
# Sens5 is H,W,1,nCh,batch_size
# fftkernc5D is H*2,W*2,nTSC,1,1
InImage=TF_addDim(InImage) # batch_size,H,W,nTSC,1
InImage=tf.transpose(InImage,[1,2,3,4,0]) # H,W,nTSC,1,batch_size
Step1=InImage*Sens5 # H,W,nTSC,nCh,batch_size
Padded=tf.pad(Step1, paddingsY, "CONSTANT") # H*2,W*2,nTSC,nCh,batch_size
Step2=TF_fft2d_on5d(Padded)
# Step2=tf.transpose(Step2,[1,0,2,3,4])
Step2=Step2*fftkernc5
# Step2=tf.transpose(Step2,[1,0,2,3,4])
Step2=TF_ifft2d_on5d(Step2)
Cropped=tf.slice(Step2,[0,0,0,0,0],[H,W,-1,-1,-1])
Step3a=Cropped*tf.conj(Sens5)
Step3=tf.reduce_sum(Step3a,axis=[3]) # H,W,nTSC,batch_size
Step3=tf.transpose(Step3,[3,0,1,2]) # batch_size,H,W,nTSC
return Step3 # batch_size,H,W,nTSC
def TS_NUFFT_OPHOP(InImage,TSCSens,H,W,batch_size,paddingsY,nTSC,nCh,fftkernc5D,SumOver=True):
InImage=TF_3d_to_5d(InImage)
InImage=tf.transpose(InImage,[1,2,3,4,0])
Step1=tf.multiply(InImage,TSCSens)
Padded=tf.pad(Step1, paddingsY, "CONSTANT")
Step2=TF_fft2d_on5d(Padded)
# Step2=tf.transpose(Step2,[1,0,2,3,4])
Step2=tf.multiply(Step2,fftkernc5D)
# Step2=tf.transpose(Step2,[1,0,2,3,4])
Step2=TF_ifft2d_on5d(Step2)
Cropped=tf.slice(Step2,[0,0,0,0,0],[H,W,nTSC,nCh,batch_size])
Step3a=tf.multiply(Cropped,tf.conj(TSCSens))
if SumOver:
Step3=tf.reduce_sum(Step3a,axis=[2,3])
Step3=tf.transpose(Step3,[2,0,1])
return Step3
else:
return Step3a
def TS_NUFFT_OP(InImage,TSCSens,SNc,H,W,batch_size,paddingsX,nTraj,nTSC,nCh,sp_C,TSBFXc):
InImage=tf.stack([tf.stack([InImage],axis=3)],axis=4)
InImage=tf.transpose(InImage,[1,2,3,4,0])
Step1=tf.multiply(InImage,SNc)
Step1=tf.multiply(Step1,TSCSens)
Step1=tf.reshape(Step1,[H,W,nTSC*nCh*batch_size])
Padded=tf.pad(Step1, paddingsX, "CONSTANT")
Step2a=TF_fft2d_on3d(Padded)
Step2=tf.transpose(Step2a,[1,0,2])
Col=tf.reshape(Step2,[-1,nTSC*nCh*batch_size])
C=tf.sparse_tensor_dense_matmul(sp_C,Col)
CX=tf.reshape(C,[nTraj,nTSC,nCh,batch_size])
WithTSB=CX*TSBFXc
WithTSBR=tf.reduce_sum(WithTSB,axis=1)
Sig=tf.transpose(WithTSBR,[2,0,1])
return Sig
def TS_NUFFT_OP_H(Sig,TSCSens,SNc,H,W,batch_size,paddingsX,nTraj,nTSC,nCh,sp_C,TSBFXc,SumOver=True):
SigP=tf.transpose(tf.stack([Sig],axis=3),[1,3,2,0])
SWithTSB=tf.multiply(tf.conj(TSBFXc),SigP)
SWithTSB=tf.reshape(SWithTSB,[nTraj,nTSC*nCh*batch_size])
C=tf.conj(tf.sparse_tensor_dense_matmul(sp_C,tf.conj(SWithTSB),adjoint_a=True))
# C=tf.sparse_tensor_dense_matmul(sp_C,SWithTSB,adjoint_a=True)
PaddedH=tf.reshape(C,[H*2,W*2,nTSC*nCh*batch_size])
PaddedH=tf.transpose(PaddedH,[1,0,2])
Step2=TF_ifft2d_on3d(PaddedH)*H*W*2*2
Cropped=tf.slice(Step2,[0,0,0],[H,W,nTSC*nCh*batch_size])
Cropped=tf.reshape(Cropped,[H,W,nTSC,nCh,batch_size])
Step1=tf.multiply(Cropped,tf.conj(TSCSens))
Step1=tf.multiply(Step1,tf.conj(SNc))
if SumOver:
yNew=tf.reduce_sum(Step1,axis=[2,3])
yNew=tf.transpose(yNew,[2,0,1])
return yNew
else:
return Step1
# def TS_NUFFT_OP_H(Sig,TSCSens,SNc,H,W,batch_size,paddingsX,nTraj,nTSC,nCh,sp_C,TSBFXc):
# SigP=tf.transpose(tf.stack([Sig],axis=3),[1,3,2,0])
# SWithTSB=tf.multiply(tf.conj(TSBFXc),SigP)
# SWithTSB=tf.reshape(SWithTSB,[nTraj,nTSC*nCh*batch_size])
# C=tf.conj(tf.sparse_tensor_dense_matmul(sp_C,tf.conj(SWithTSB),adjoint_a=True))
# # C=tf.sparse_tensor_dense_matmul(sp_C,SWithTSB,adjoint_a=True)
# PaddedH=tf.reshape(C,[H*2,W*2,nTSC*nCh*batch_size])
# Step2=tf.transpose(tf.ifft(tf.transpose(tf.ifft(tf.transpose(PaddedH,perm=[2,0,1])),perm=[0,2,1])),perm=[1,2,0])*np.sqrt(2*2*H*W)
# Cropped=tf.slice(Step2,[0,0,0],[H,W,nTSC*nCh*batch_size])
# Cropped=tf.reshape(Cropped,[H,W,nTSC,nCh,batch_size])
# Step1=tf.multiply(Cropped,tf.conj(TSCSens))
# Step1=tf.multiply(Step1,tf.conj(SNc))
# yNew=tf.reduce_sum(Step1,axis=[2,3])
# yNew=tf.transpose(yNew,[2,0,1])
# return yNew
# def TS_NUFFT_OP(InImage,TSCSens,SNc,H,W,batch_size,paddingsX,nTraj,nTSC,nCh,sp_C,TSBFXc):
# InImage=tf.stack([tf.stack([InImage],axis=3)],axis=4)
# InImage=tf.transpose(InImage,[1,2,3,4,0])
# Step1=tf.multiply(InImage,SNc)
# Step1=tf.multiply(Step1,TSCSens)
# Step1=tf.reshape(Step1,[H,W,nTSC*nCh*batch_size])
# Padded=tf.pad(Step1, paddingsX, "CONSTANT")
# Step2=tf.transpose(tf.fft(tf.transpose(tf.fft(tf.transpose(Padded,perm=[2,0,1])),perm=[0,2,1])),perm=[1,2,0])/np.sqrt(2*2*H*W)
# Col=tf.reshape(Step2,[-1,nTSC*nCh*batch_size])
# C=tf.sparse_tensor_dense_matmul(sp_C,Col)
# CX=tf.reshape(C,[nTraj,nTSC,nCh,batch_size])
# WithTSB=CX*TSBFXc
# WithTSBR=tf.reduce_sum(WithTSB,axis=1)
# Sig=tf.transpose(WithTSBR,[2,0,1])
# return Sig
def TF_TSNUFFT_Run_TSCin(InImage,TSCin,SNc,paddings,nTraj,nTSC,nCh,sp_R,sp_I,TSBFX):
# SNx=tf.reshape(SNx,[SNx.shape[0],SNx.shape[1],1])
InImage=InImage*TSCin
# InImage=tf.reshape(InImage,[InImage.shape[0],InImage.shape[1],1])
Step1=tf.multiply(InImage,SNc)
Padded=tf.pad(Step1, paddings, "CONSTANT")
Step2=tf.transpose(tf.fft(tf.transpose(tf.fft(tf.transpose(Padded,perm=[2,0,1])),perm=[0,2,1])),perm=[1,2,0])
# Step2=tf.fft(tf.transpose(tf.fft(Padded),perm=[1,0]))
Col=tf.reshape(Step2,[-1,nTSC*nCh])
ColR=tf.real(Col)
ColI=tf.imag(Col)
RR=tf.sparse_tensor_dense_matmul(sp_R,ColR)
RI=tf.sparse_tensor_dense_matmul(sp_R,ColI)
IR=tf.sparse_tensor_dense_matmul(sp_I,ColR)
II=tf.sparse_tensor_dense_matmul(sp_I,ColI)
R=RR-II
I=RI+IR
C=tf.complex(R,I)
# pdb.set_trace()
# CX=np.reshape(C,(nTraj,nTSC,nCh))
CX=tf.reshape(C,[nTraj,nTSC,nCh])
WithTSB=CX*TSBFX
WithTSBR=tf.reduce_sum(WithTSB,axis=1)
return WithTSBR
def TF_TSNUFFT_Run(InImage,SNc,paddings,nTraj,nTSC,nCh,sp_R,sp_I,TSBFX):
# SNx=tf.reshape(SNx,[SNx.shape[0],SNx.shape[1],1])
InImage=tf.reshape(InImage,[InImage.shape[0],InImage.shape[1],1])
Step1=tf.multiply(InImage,SNc)
Padded=tf.pad(Step1, paddings, "CONSTANT")
Step2=tf.transpose(tf.fft(tf.transpose(tf.fft(tf.transpose(Padded,perm=[2,0,1])),perm=[0,2,1])),perm=[1,2,0])
# Step2=tf.fft(tf.transpose(tf.fft(Padded),perm=[1,0]))
Col=tf.reshape(Step2,[-1,nTSC*nCh])
ColR=tf.real(Col)
ColI=tf.imag(Col)
RR=tf.sparse_tensor_dense_matmul(sp_R,ColR)
RI=tf.sparse_tensor_dense_matmul(sp_R,ColI)
IR=tf.sparse_tensor_dense_matmul(sp_I,ColR)
II=tf.sparse_tensor_dense_matmul(sp_I,ColI)
R=RR-II
I=RI+IR
C=tf.complex(R,I)
# pdb.set_trace()
# CX=np.reshape(C,(nTraj,nTSC,nCh))
CX=tf.reshape(C,[nTraj,nTSC,nCh])
WithTSB=CX*TSBFX
WithTSBR=tf.reduce_sum(WithTSB,axis=1)
return WithTSBR
def TF_TSNUFFT_Run3(H,W,InImage,SNc,paddings,nTraj,nTSC,nCh,sp_R,sp_I,TSBFX):
# SNx=tf.reshape(SNx,[SNx.shape[0],SNx.shape[1],1])
# InImage=tf.reshape(InImage,[InImage.shape[0],InImage.shape[1],1])
Step1=tf.multiply(InImage,SNc)
Step1=tf.reshape(Step1,[H,W,nCh*nTSC])
Padded=tf.pad(Step1, paddings, "CONSTANT")
Step2=tf.transpose(tf.fft(tf.transpose(tf.fft(tf.transpose(Padded,perm=[2,0,1])),perm=[0,2,1])),perm=[1,2,0])
# Step2=tf.fft(tf.transpose(tf.fft(Padded),perm=[1,0]))
Col=tf.reshape(Step2,[-1,nTSC*nCh])
ColR=tf.real(Col)
ColI=tf.imag(Col)
RR=tf.sparse_tensor_dense_matmul(sp_R,ColR)
RI=tf.sparse_tensor_dense_matmul(sp_R,ColI)
IR=tf.sparse_tensor_dense_matmul(sp_I,ColR)
II=tf.sparse_tensor_dense_matmul(sp_I,ColI)
R=RR-II
I=RI+IR
C=tf.complex(R,I)
# pdb.set_trace()
# CX=np.reshape(C,(nTraj,nTSC,nCh))
CX=tf.reshape(C,[nTraj,nTSC,nCh])
WithTSB=CX*TSBFX
WithTSBR=tf.reduce_sum(WithTSB,axis=1)
return WithTSBR
def TF_TSNUFFT_Prepare3(SN,Sens,TSBF,Kd,P):
nTraj=TSBF.shape[1]
nTSC=TSBF.shape[0]
InputIShape=Sens.shape[0:2]
nCh=Sens.shape[2]
# TSCX=np.reshape(TSC,np.concatenate((TSC.shape,[1]),axis=0))
SensP=np.transpose(np.reshape(Sens,np.concatenate((Sens.shape,[1]),axis=0)),(0,1,3,2))
# SensWithTSC=SensP*TSCX
# SensWithTSCX=np.reshape(SensWithTSC,(InputIShape[0],InputIShape[1],nCh*nTSC))
# SNX=np.reshape(SN,np.concatenate((SN.shape,[1]),axis=0))
SNX=NP_addDim(NP_addDim(SN))
SensWithSN=SensP*SNX
# SensWithTSCXWithSN=SensWithTSCX*SNX
# SNc=tf.constant(tf.cast(SensWithTSCXWithSN,tf.complex64))
# SNc=tf.constant(np.complex64(SensWithTSCXWithSN))
SNc=tf.constant(np.complex64(SensWithSN))
TSBFX=np.transpose(np.reshape(TSBF,(nTSC,1,nTraj)),axes=(2,0,1))
TSBFX=tf.constant(np.complex64(TSBFX))
ToPad=[Kd[0,0]-InputIShape[0],Kd[0,1]-InputIShape[1]]
paddings = tf.constant([[0, ToPad[0]], [0, ToPad[1]],[0,0]])
# paddings = tf.constant([[0, 68], [0, 60]])
Idx=scipy.sparse.find(P)
I2=np.vstack([Idx[0],Idx[1]]).T
I2=tf.constant(np.int64(I2))
ValR=tf.constant(np.float32(np.real(Idx[2])))
ValI=tf.constant(np.float32(np.imag(Idx[2])))
sp_R = tf.SparseTensor(I2, ValR, [P.shape[0],P.shape[1]])
sp_I = tf.SparseTensor(I2, ValI, [P.shape[0],P.shape[1]])
# sp_R = tf.SparseTensor(I2, tf.cast(np.real(Idx[2]),tf.float32), [P.shape[0],P.shape[1]])
# sp_I = tf.SparseTensor(I2, tf.cast(np.imag(Idx[2]),tf.float32), [P.shape[0],P.shape[1]])
return SNc,paddings,sp_R,sp_I,TSBFX
def TF_TSNUFFT_Prepare2(SN,Sens,TSC,TSBF,Kd,P):
nTraj=TSBF.shape[1]
nTSC=TSBF.shape[0]
InputIShape=Sens.shape[0:2]
nCh=Sens.shape[2]
# TSCX=np.reshape(TSC,np.concatenate((TSC.shape,[1]),axis=0))
TSCX=tf.stack([TSC],axis=3)
SensP=np.transpose(np.reshape(Sens,np.concatenate((Sens.shape,[1]),axis=0)),(0,1,3,2))
SensPT=tf.constant(np.complex64(SensP))
SensWithTSC=tf.multiply(SensPT,TSCX)
SensWithTSCX=tf.reshape(SensWithTSC,[SN.shape[0],SN.shape[1],-1])
# SensWithTSCX=np.reshape(SensWithTSC,(InputIShape[0],InputIShape[1],nCh*nTSC))
SNX=np.reshape(SN,np.concatenate((SN.shape,[1]),axis=0))
SNXT=tf.constant(np.complex64(SNX))
SensWithTSCXWithSN=SensWithTSCX*SNXT
#print('SensPT')
#print(SensPT.shape)
#print('TSCX')
#print(TSCX.shape)
#print('SensWithTSC')
#print(SensWithTSC.shape)
#print('SensWithTSCXWithSN')
#print(SensWithTSCXWithSN.shape)
# SNc=tf.constant(tf.cast(SensWithTSCXWithSN,tf.complex64))
# SNc=tf.constant(np.complex64(SensWithTSCXWithSN))
# SNc=tf.constant(SensWithTSCXWithSN)
SNc=SensWithTSCXWithSN
TSBFX=np.transpose(np.reshape(TSBF,(nTSC,1,nTraj)),axes=(2,0,1))
TSBFX=tf.constant(np.complex64(TSBFX))
ToPad=[Kd[0,0]-InputIShape[0],Kd[0,1]-InputIShape[1]]
paddings = tf.constant([[0, ToPad[0]], [0, ToPad[1]],[0,0]])
# paddings = tf.constant([[0, 68], [0, 60]])
Idx=scipy.sparse.find(P)
I2=np.vstack([Idx[0],Idx[1]]).T
I2=tf.constant(np.int64(I2))
ValR=tf.constant(np.float32(np.real(Idx[2])))
ValI=tf.constant(np.float32(np.imag(Idx[2])))
ValC=tf.constant(np.complex64(Idx[2]))
sp_R = tf.SparseTensor(I2, ValR, [P.shape[0],P.shape[1]])
sp_I = tf.SparseTensor(I2, ValI, [P.shape[0],P.shape[1]])
sp_C = tf.SparseTensor(I2, ValC, [P.shape[0],P.shape[1]])
# sp_R = tf.SparseTensor(I2, tf.cast(np.real(Idx[2]),tf.float32), [P.shape[0],P.shape[1]])
# sp_I = tf.SparseTensor(I2, tf.cast(np.imag(Idx[2]),tf.float32), [P.shape[0],P.shape[1]])
return SNc,paddings,sp_R,sp_I,TSBFX,sp_C
def TF_TSNUFFT_Prepare(SN,Sens,TSC,TSBF,Kd,P):
nTraj=TSBF.shape[1]
nTSC=TSC.shape[2]
InputIShape=Sens.shape[0:2]
nCh=Sens.shape[2]
TSCX=np.reshape(TSC,np.concatenate((TSC.shape,[1]),axis=0))
SensP=np.transpose(np.reshape(Sens,np.concatenate((Sens.shape,[1]),axis=0)),(0,1,3,2))
SensWithTSC=SensP*TSCX
SensWithTSCX=np.reshape(SensWithTSC,(InputIShape[0],InputIShape[1],nCh*nTSC))
SNX=np.reshape(SN,np.concatenate((SN.shape,[1]),axis=0))
SensWithTSCXWithSN=SensWithTSCX*SNX
# SNc=tf.constant(tf.cast(SensWithTSCXWithSN,tf.complex64))
SNc=tf.constant(np.complex64(SensWithTSCXWithSN))
TSBFX=np.transpose(np.reshape(TSBF,(nTSC,1,nTraj)),axes=(2,0,1))
TSBFX=tf.constant(np.complex64(TSBFX))
ToPad=[Kd[0,0]-InputIShape[0],Kd[0,1]-InputIShape[1]]
paddings = tf.constant([[0, ToPad[0]], [0, ToPad[1]],[0,0]])
# paddings = tf.constant([[0, 68], [0, 60]])
Idx=scipy.sparse.find(P)
I2=np.vstack([Idx[0],Idx[1]]).T
I2=tf.constant(np.int64(I2))
ValR=tf.constant(np.float32(np.real(Idx[2])))
ValI=tf.constant(np.float32(np.imag(Idx[2])))
sp_R = tf.SparseTensor(I2, ValR, [P.shape[0],P.shape[1]])
sp_I = tf.SparseTensor(I2, ValI, [P.shape[0],P.shape[1]])
# sp_R = tf.SparseTensor(I2, tf.cast(np.real(Idx[2]),tf.float32), [P.shape[0],P.shape[1]])
# sp_I = tf.SparseTensor(I2, tf.cast(np.imag(Idx[2]),tf.float32), [P.shape[0],P.shape[1]])
return SNc,paddings,sp_R,sp_I,TSBFX
def TF_NUFT(A,SN,Kd,P):
# A is data, e.g. of size H,W,nMaps
# SN should be from Fessler, .* Channel maps; so finally H,W,nMaps
# Kd is the final size for the overFT, e.g. H*2,W*2
# P is a sparse matrix of nTraj x H*W ; <101x16320 sparse matrix of type '<class 'numpy.complex128'>' with 2525 stored elements in Compressed Sparse Column format>
# MData=scipy.io.loadmat('/media/a/f38a5baa-d293-4a00-9f21-ea97f318f647/home/a/gUM/ForTFNUFT.mat')
# A=MData['A']
# SN=MData['SN']
# Kd=MData['Kd']
# P=MData['P']
# NUbyFS3=MData['NUbyFS3'].T
ToPad=[Kd[0,0]-A.shape[0],Kd[0,1]-A.shape[1]]
paddings = tf.constant([[0, ToPad[0]], [0, ToPad[1]],[0,0]])
# paddings = tf.constant([[0, 68], [0, 60]])
nMaps=2 # A.shape[1]
Idx=scipy.sparse.find(P)
I2=np.vstack([Idx[0],Idx[1]]).T
sp_R = tf.SparseTensor(I2, tf.cast(np.real(Idx[2]),tf.float32), [101,16320])
sp_I = tf.SparseTensor(I2, tf.cast(np.imag(Idx[2]),tf.float32), [101,16320])
SNx=tf.constant(tf.cast(SN,tf.complex64))
Ax=tf.constant(tf.cast(A,tf.complex64))
SNx=tf.reshape(SNx,[SNx.shape[0],SNx.shape[1],1])
Step1=tf.multiply(Ax,SNx)
Padded=tf.pad(Step1, paddings, "CONSTANT")
Step2=tf.transpose(tf.fft(tf.transpose(tf.fft(tf.transpose(Padded,perm=[2,0,1])),perm=[0,2,1])),perm=[1,2,0])
# Step2=tf.fft(tf.transpose(tf.fft(Padded),perm=[1,0]))
Col=tf.reshape(Step2,[-1,nMaps])
ColR=tf.real(Col)
ColI=tf.imag(Col)
RR=tf.sparse_tensor_dense_matmul(sp_R,ColR)
RI=tf.sparse_tensor_dense_matmul(sp_R,ColI)
IR=tf.sparse_tensor_dense_matmul(sp_I,ColR)
II=tf.sparse_tensor_dense_matmul(sp_I,ColI)
R=RR-II
I=RI+IR
C=tf.complex(R,I)
return C
def GenerateNeighborsMapBaseExt(Traj,kMax,osN,nNeighbors):
NMap=np.zeros([osN,osN,nNeighbors],dtype='int32')
DMap=np.zeros([osN,osN,nNeighbors,2],dtype='float32')
C=np.arange(-63,65)
for i in np.arange(0,osN):
for j in np.arange(0,osN):
CurLoc=np.vstack([C[i], C[j]])
D=Traj-CurLoc
D1=np.squeeze(D[0,:])
D2=np.squeeze(D[1,:])
R=np.linalg.norm(D,ord=2,axis=0)/np.sqrt(2)
Idx=np.argsort(R)
Idxs=Idx[0:nNeighbors]
NMap[i,j,:]=Idxs
DMap[i,j,:,0]=D1[Idxs]
DMap[i,j,:,1]=D2[Idxs]
return NMap, DMap
def GenerateNeighborsMapBase(Traj,kMax,osN,nNeighbors):
nTrajAct=Traj.shape[1]
NMap=np.zeros([osN,osN,nNeighbors],dtype='int32')
# C=linspaceWithHalfStep(-kMax,kMax,osN)
C=np.arange(-63,65)
for i in np.arange(0,osN):
for j in np.arange(0,osN):
CurLoc=np.vstack([C[i], C[j]])
D=Traj-CurLoc
R=np.linalg.norm(D,ord=2,axis=0)/np.sqrt(2)
Idx=np.argsort(R)
NMap[i,j,:]=Idx[0:nNeighbors]
return NMap
def GenerateNeighborsMap(Traj,kMax,osN,ncc,nChToUseInNN,nNeighbors):
# kMax=np.ceil(np.amax(np.abs(CurBartTraj)))
# osfForNbrhd=1.3;
# osN=(np.ceil(kMax*osfForNbrhd)*2+1).astype(int)
# nChToUseInNN=8
# ncc=8
nTrajAct=Traj.shape[1]
# nNeighbors=12
NMap=np.zeros([osN,osN,nNeighbors],dtype='int32')
C=linspaceWithHalfStep(-kMax,kMax,osN)
for i in np.arange(0,osN):
for j in np.arange(0,osN):
CurLoc=np.vstack([C[i], C[j]])
D=Traj-CurLoc
R=np.linalg.norm(D,ord=2,axis=0)/np.sqrt(2)
Idx=np.argsort(R)
NMap[i,j,:]=Idx[0:nNeighbors]
a=np.reshape(np.arange(0,nChToUseInNN)*nTrajAct,(1,1,1,nChToUseInNN))
NMapC=np.reshape(NMap,(NMap.shape[0],NMap.shape[1],NMap.shape[2],1))+a
NMapC=np.transpose(NMapC,(0,1,2,3))
NMapCX=np.reshape(NMapC,(osN,osN,nNeighbors*nChToUseInNN))
NMapCR=np.concatenate((NMapCX,NMapCX+nTrajAct*ncc),axis=2)
return NMapCR
# T=scipy.io.loadmat('/media/a/H1/NMapTest.mat')
# Traj=T['Traj'][0:2,:]
# NMapRef=T['NMap']-1
# NMapCRef=T['NMapC']-1
# NMapCXRef=T['NMapCX']-1
# NMapCRRef=T['NMapCR']
# Out=np.amax(np.abs(NMap-NMapRef))
# OutC=np.amax(np.abs(NMapC-NMapCRef))
# OutCX=np.amax(np.abs(NMapCX-NMapCXRef))
# OutCR=np.amax(np.abs(NMapCR-NMapCRRef))
# [Out, OutC,OutCX,OutCR]
# Result: [0, 0, 0, 0]
def GenerateNeighborsMapC(Traj,kMax,osN,ncc,nChToUseInNN,nNeighbors):
# kMax=np.ceil(np.amax(np.abs(CurBartTraj)))
# osfForNbrhd=1.3;
# osN=(np.ceil(kMax*osfForNbrhd)*2+1).astype(int)
# nChToUseInNN=8
# ncc=8
nTrajAct=Traj.shape[1]
# nNeighbors=12
NMap=np.zeros([osN,osN,nNeighbors],dtype='int32')
C=linspaceWithHalfStep(-kMax,kMax,osN)
for i in np.arange(0,osN):
for j in np.arange(0,osN):
CurLoc=np.vstack([C[i], C[j]])
D=Traj-CurLoc
R=np.linalg.norm(D,ord=2,axis=0)/np.sqrt(2)
Idx=np.argsort(R)
NMap[i,j,:]=Idx[0:nNeighbors]
a=np.reshape(np.arange(0,nChToUseInNN)*nTrajAct,(1,1,1,nChToUseInNN))
NMapC=np.reshape(NMap,(NMap.shape[0],NMap.shape[1],NMap.shape[2],1))+a
NMapC=np.transpose(NMapC,(0,1,2,3))
NMapCX=np.reshape(NMapC,(osN,osN,nNeighbors*nChToUseInNN))
# NMapCR=np.concatenate((NMapCX,NMapCX+nTrajAct*ncc),axis=2)
return NMapCX
def MoveWithCopiedBackwards(N,L):
out=tf.concat([tf.range(L,N), tf.range(N-2,N-2-L,-1)],axis=0)
return out
def MoveWithCopiedForwards(N,L):
out=tf.concat([tf.range(L,0,-1), tf.range(0,N-L)],axis=0)
return out
def ExpandWithBackwardsOn2(A,N,K):
B=A
for x in range(1, K):
CurMove=MoveWithCopiedBackwards(N,x)
CurB=tf.gather(A,CurMove,axis=0)
B=tf.concat([B, CurB],axis=2)
return B
def ExpandWithForwardsOn2(A,N,K):
B=A
for x in range(1, K):
CurMove=MoveWithCopiedForwards(N,x)
CurB=tf.gather(A,CurMove,axis=0)
B=tf.concat([B, CurB],axis=2)
return B
def ExpandWithCopiesOn2(A,N,K):
Back=ExpandWithBackwardsOn2(A,N,K)
Forward=ExpandWithForwardsOn2(A,N,K)
B=tf.concat([Back,A,Forward],axis=2)
return B
def gifft_TFOn3D(x,H,dim=0):
HalfH=H/2
Id=np.hstack([np.arange(HalfH,H), np.arange(0,HalfH)])
Id=Id.astype(int)
if dim==0 :
x = tf.transpose(x, perm=[2,1,0])
if dim==1 :
x = tf.transpose(x, perm=[0,2,1])
x = tf.gather(x,Id,axis=2)
out=tf.ifft(x)
out=tf.multiply(out,tf.sqrt(tf.cast(H,tf.complex64)))
out = tf.gather(out,Id,axis=2)
if dim==0 :
out = tf.transpose(out, perm=[2,1, 0])
if dim==1 :
out = tf.transpose(out, perm=[0,2,1])
return out
def gfft_TFOn3D(x,H,dim=0):
HalfH=H/2
Id=np.hstack([np.arange(HalfH,H), np.arange(0,HalfH)])
Id=Id.astype(int)
if dim==0 :
x = tf.transpose(x, perm=[2,1,0])
if dim==1 :
x = tf.transpose(x, perm=[0,2,1])
x = tf.gather(x,Id,axis=2)
out=tf.fft(x)
out=tf.divide(out,tf.sqrt(tf.cast(H,tf.complex64)))
out = tf.gather(out,Id,axis=2)
if dim==0 :
out = tf.transpose(out, perm=[2,1, 0])
if dim==1 :
out = tf.transpose(out, perm=[0,2,1])
return out
def gfft_TF(x,H,dim=0):
HalfH=H/2
Id=np.hstack([np.arange(HalfH,H), np.arange(0,HalfH)])
Id=Id.astype(int)
# IQ2=tf.reshape(IQ,IQ.shape[0:2])
if dim==1 :
x = tf.transpose(x, perm=[1, 0])
x = tf.gather(x,Id,axis=1)
out=tf.fft(x)
out=tf.divide(out,tf.sqrt(tf.cast(H,tf.complex64)))
out = tf.gather(out,Id,axis=1)
if dim==1 :
out = tf.transpose(out, perm=[1,0])
return out
def gfft(x,dim=0):
out=np.fft.fftshift(np.fft.fft(np.fft.ifftshift(x,axes=dim),axis=dim),axes=dim)
out=out/np.sqrt(x.shape[dim])
return out
def gifft(x,dim=0):
out=np.fft.fftshift(np.fft.ifft(np.fft.ifftshift(x,axes=dim),axis=dim),axes=dim)
out=out*np.sqrt(x.shape[dim])
return out
def IDFT_matrix(N):
HalfN=N/2
Id=np.hstack([np.arange(HalfN,N), | np.arange(0,HalfN) | numpy.arange |
import shutil
import numpy as np
import math
import pandas as pd
from urllib.request import urlopen
import cv2
from skimage import exposure
import shapely
import glob
import os
from osgeo import gdal
import utm
import itertools
import geopandas as gpd
import pathlib
import matplotlib.pyplot as plt
import matplotlib._color_data as mcd
import contextily as ctx
import time
cycle = list(mcd.XKCD_COLORS.values())
import hsfm
import bare
"""
Core data wrangling and preprocessing functions.
"""
# TODO
# - break this up into seperate libraries and classes to better
# accomodate other imagery and generealize upstream as much as possible.
def get_gcp_polygon(fn):
file_name = os.path.splitext(os.path.split(fn)[-1])[0]
df = pd.read_csv(fn, header=None, sep=' ')
df = df[[1,2]]
df.columns=['lat','lon']
gdf = hsfm.geospatial.df_points_to_polygon_gdf(df)
gdf['camera'] = file_name
return gdf
def create_overlap_list(gcp_directory,
image_directory,
output_directory):
output_directory = os.path.join(output_directory, 'ba')
hsfm.io.create_dir(output_directory)
filename_out = os.path.join(output_directory,'overlaplist.txt')
if os.path.exists(filename_out):
os.remove(filename_out)
gcp_files = glob.glob(os.path.join(gcp_directory,'*.gcp'))
image_files = glob.glob(os.path.join(image_directory,'*.tif'))
footprints = []
for fn in gcp_files:
gdf = get_gcp_polygon(fn)
footprints.append(gdf)
pairs=[]
for a, b in itertools.combinations(footprints, 2):
result = hsfm.geospatial.compare_footprints(a, b)
if result == 1:
c = hsfm.io.retrieve_match(a['camera'].values[0] , image_files)
d = hsfm.io.retrieve_match(b['camera'].values[0] , image_files)
pairs.append((c,d))
pairs = sorted(list(set(pairs)))
for i in pairs:
with open(filename_out, 'a') as out:
out.write(i[0] + ' '+ i[1]+'\n')
return filename_out
def create_overlap_list_from_match_files(match_files_directory,
image_directory,
output_directory,
suffix='.match'):
output_directory = os.path.join(output_directory, 'ba')
hsfm.io.create_dir(output_directory)
filename_out = os.path.join(output_directory,'overlaplist.txt')
if os.path.exists(filename_out):
os.remove(filename_out)
match_files = sorted(glob.glob(os.path.join(match_files_directory, '*' + suffix)))
match_files
pairs = []
for match_file in match_files:
img1_fn, img2_fn = bare.core.parse_image_names_from_match_file_name(match_file,
image_directory,
'tif')
pairs.append((img1_fn, img2_fn))
# creates full set from .match and clean.match pairs
pairs = sorted(list(set(pairs)))
for i in pairs:
with open(filename_out, 'a') as out:
out.write(i[0] + ' '+ i[1]+'\n')
return filename_out
def determine_flight_lines(df,
cutoff_angle = 30,
file_base_name_column = 'fileName',
longitude_column = 'Longitude',
latitude_column = 'Latitude'):
df = hsfm.batch.calculate_heading_from_metadata(df,
file_base_name_column = file_base_name_column,
longitude_column = longitude_column,
latitude_column = latitude_column)
df['next_heading'] = df['heading'].shift(-1)
df['heading_diff'] = abs(df['next_heading'] - df['heading'])
df['heading_diff'] = df['heading_diff'].fillna(0)
df = df.reset_index(drop=True)
flights_tmp = []
tmp_df = pd.DataFrame()
for row in df.iterrows():
if row[1]['heading_diff'] < cutoff_angle:
tmp_df = pd.concat([tmp_df, pd.DataFrame(row[1])],axis=1)
else:
tmp_df = pd.concat([tmp_df, pd.DataFrame(row[1])],axis=1)
tmp_df = tmp_df.T
flights_tmp.append(tmp_df)
tmp_df = pd.DataFrame()
tmp_df = tmp_df.T
if not tmp_df.empty:
flights_tmp.append(tmp_df.reset_index(drop=True))
flights = []
for i,v in enumerate(flights_tmp):
if len(v) == 1:
tmp_df = pd.concat([flights_tmp[i-1],v])
flights.pop()
flights.append(tmp_df.reset_index(drop=True))
else:
flights.append(v.reset_index(drop=True))
return flights
def evaluate_image_frame(grayscale_unit8_image_array,frame_size=0.07):
x = grayscale_unit8_image_array.shape[1]
y = grayscale_unit8_image_array.shape[0]
img = grayscale_unit8_image_array
window = [0,y,0,x]
slice_left_top = frame_size
slice_right_bottom = 1-frame_size
x_slice_left = int(x * slice_left_top)
x_slice_right = int(x * slice_right_bottom)
y_slice_top = int(y * slice_left_top)
y_slice_bottom = int(y * slice_right_bottom)
left = img[0:y, 0:x_slice_left]
top = img[0:y_slice_top, 0:x]
right = img[0:y, x_slice_right:x]
bottom = img[y_slice_bottom:y, 0:x]
stats = {'left':np.median(left),
'right':np.median(right),
'top': np.median(top),
'bottom':np.median(bottom)}
side = min(stats, key=lambda key: stats[key])
return side
def calculate_distance_principal_point_to_image_edge(focal_length_mm,
image_width_px,
image_height_px,
camera_lat_lon_wgs84_center_coordinates,
reference_dem,
flight_altitude_above_ground_m=1500,
pixel_pitch=None):
"""
Function to calculate distance on ground from principal point to image edge.
"""
# TODO
# - Sample elevation of reference DEM at camera center and subtract from
# NAGAP altitude metadata to get altitude above ground. Assumes NAGAP
# flights left from sea level. May not be necessary if 3000 meters (10,000 feet)
# assumption is good enough for ASP bundle_adjust to correct from.
elevation = hsfm.geospatial.sample_dem([camera_lat_lon_wgs84_center_coordinates[1],],
[camera_lat_lon_wgs84_center_coordinates[0],],
reference_dem)
altitude_above_ground_m = flight_altitude_above_ground_m - elevation[0]
c = 500
while altitude_above_ground_m < 500:
altitude_above_ground_m = flight_altitude_above_ground_m + c - elevation[0]
c = c+500
# print(elevation[0], altitude_above_ground_m)
# Divide image width in pixels by pixel pitch to get distance in millimeters.
image_width_mm = image_width_px * pixel_pitch
image_height_mm = image_height_px * pixel_pitch
# Calculate angle between principal point and image edge.
angle_pp_img_edge_x = np.degrees(np.arctan((image_width_mm/2)/focal_length_mm))
angle_pp_img_edge_y = np.degrees(np.arctan((image_height_mm/2)/focal_length_mm))
# In theory, the distance to the sensor should be added to get the true sensor altitude
# above ground. Likely does not make a difference here.
sensor_altitude_above_ground_m = focal_length_mm/1000 + altitude_above_ground_m
# Calculate x and y distances seperately in case images are not square.
# This is needed for hsfm.trig.calculate_corner()
distance_pp_img_edge_x_m = np.tan( | np.deg2rad(angle_pp_img_edge_x) | numpy.deg2rad |
#----------------------------Reproducible----------------------------------------------------------------------------------------
import numpy as np
import tensorflow as tf
import random as rn
import os
seed=0
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
rn.seed(seed)
#session_conf = tf.ConfigProto(intra_op_parallelism_threads=1, inter_op_parallelism_threads=1)
session_conf =tf.compat.v1.ConfigProto(intra_op_parallelism_threads=1, inter_op_parallelism_threads=1)
from keras import backend as K
#tf.set_random_seed(seed)
tf.compat.v1.set_random_seed(seed)
#sess = tf.Session(graph=tf.get_default_graph(), config=session_conf)
sess = tf.compat.v1.Session(graph=tf.compat.v1.get_default_graph(), config=session_conf)
K.set_session(sess)
#----------------------------Reproducible----------------------------------------------------------------------------------------
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
#--------------------------------------------------------------------------------------------------------------------------------
from keras.datasets import mnist
from keras.models import Model
from keras.layers import Dense, Input, Flatten, Activation, Dropout, Layer
from keras.layers.normalization import BatchNormalization
from keras.utils import to_categorical
from keras import optimizers,initializers,constraints,regularizers
from keras import backend as K
from keras.callbacks import LambdaCallback,ModelCheckpoint
from keras.utils import plot_model
from sklearn.model_selection import StratifiedKFold
from sklearn.ensemble import ExtraTreesClassifier
from sklearn import svm
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import ShuffleSplit
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from sklearn.svm import SVC
import h5py
import math
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.cm as cm
#--------------------------------------------------------------------------------------------------------------------------------
def show_data_figures(p_data,w=20,h=20,columns = 20):
# It shows the figures of digits, the input digits are "matrix version". This is the simple displaying in this codes
rows = math.ceil(len(p_data)/columns)
fig_show_w=columns
fig_show_h=rows
fig=plt.figure(figsize=(fig_show_w, fig_show_h))
for i in range(0, len(p_data)):
fig.add_subplot(rows, columns, i+1)
plt.axis('off')
plt.imshow(p_data[i,:].reshape((w, h)),plt.cm.gray)#
plt.show()
#--------------------------------------------------------------------------------------------------------------------------------
def show_data_figures_with_keyfeature(p_data,p_key_feature_catch,w=20,h=20,columns = 20):
# It shows the figures of digits, the input digits are "matrix version". This is the simple displaying in this codes
rows = math.ceil(len(p_data)/columns)
fig_show_w=columns
fig_show_h=rows
fig=plt.figure(figsize=(fig_show_w, fig_show_h))
for i in range(0, len(p_data)):
fig.add_subplot(rows, columns, i+1)
plt.axis('off')
plt.imshow(p_data[i,:].reshape((w, h)),plt.cm.gray)
for key_feature_catch_i in np.arange(len(p_key_feature_catch)):
plt.scatter(p_key_feature_catch[key_feature_catch_i][1],p_key_feature_catch[key_feature_catch_i][0],s=0.5,color='r')
plt.tight_layout()
plt.show()
#--------------------------------------------------------------------------------------------------------------------------------
def top_k_keep(p_arr_,p_top_k_):
top_k_idx=p_arr_.argsort()[::-1][0:p_top_k_]
top_k_value=p_arr_[top_k_idx]
return np.where(p_arr_<top_k_value[-1],0,p_arr_)
#--------------------------------------------------------------------------------------------------------------------------------
def show_feature_selection(p_file_name,p_test_data,p_sample_number=40,p_key_number=36):
file = h5py.File(p_file_name,'r')
data = file['feature_selection']['feature_selection']['kernel:0']
weight_top_k=top_k_keep( | np.array(data) | numpy.array |
from typing import Any, Callable, Dict, List, TYPE_CHECKING, Tuple, Union, cast
import numpy as np
_operatorsIotaCounter = 0
def _operatorsIota() -> int:
global _operatorsIotaCounter
rv = _operatorsIotaCounter
_operatorsIotaCounter = _operatorsIotaCounter+1
return rv
TERM_ATOM=_operatorsIota()
TERM_ADD=_operatorsIota()
TERM_SUB=_operatorsIota()
TERM_MUL=_operatorsIota()
TERM_DIV=_operatorsIota()
TERM_AND=_operatorsIota()
TERM_EQUAL=_operatorsIota()
TERM_GT=_operatorsIota()
TERM_LT=_operatorsIota()
TERM_SYMBOL=_operatorsIota()
TERM_GETITEM=_operatorsIota()
TERM_QUADRATIC=_operatorsIota()
TERM_MAXIMIZE=_operatorsIota()
TERM_MINIMIZE=_operatorsIota()
TERM_MAXIMUM=_operatorsIota()
TERM_MINIMUM=_operatorsIota()
TERM_DECLARE=_operatorsIota()
TERM_LABEL=_operatorsIota()
TERM_OPT=_operatorsIota()
TERM_TARGET=_operatorsIota()
TERM_BITSIZE=_operatorsIota()
_TermAdd_ : Callable[[Any,Any],Any] = lambda x,y: None
_TermSub_ : Callable[[Any,Any],Any] = lambda x,y: None
_TermMul_ : Callable[[Any,Any],Any] = lambda x,y: None
_TermDiv_ : Callable[[Any,Any],Any] = lambda x,y: None
_TermEqual_ : Callable[[Any,Any],Any] = lambda x,y: None
_TermAnd_ : Callable[[Any,Any],Any] = lambda x,y: None
_TermGt_ : Callable[[Any,Any],Any] = lambda x,y: None
_TermLt_ : Callable[[Any,Any],Any] = lambda x,y: None
_TermGetItem_ : Callable[[Any,Any],Any] = lambda x,y: None
_TermQuadratic_ : Callable[[Any,Any],Any] = lambda x,y: None
_Wrapper_ : Callable[[Any],Any] = lambda x: None
class Term:
def getTag(self)->int:
pass
def __add__(self, x)->"Term":
return cast("Term",_TermAdd_(self, x))
def __sub__(self, x)->"Term":
return cast("Term",_TermSub_(self, x))
def __mul__(self, x)->"Term":
return cast("Term",_TermMul_(self, x))
def __div__(self, x)->"Term":
return cast("Term",_TermDiv_(self, x))
def __pow__(self, exponent)->"Term":
assert(exponent==2)
return _TermQuadratic_(1, self)
def __eq__(self, x)->"Term":
return cast("Term",_TermEqual_(self, x))
def __gt__(self, x)->"Term":
return cast("Term",_TermGt_(self, x))
def __lt__(self, x)->"Term":
return cast("Term",_TermLt_(self, x))
def __and__(self, x)->"Term":
return cast("Term", _TermAnd_(self,x))
def __getitem__(self, x)->"Term":
return cast("Term", _TermGetItem_(self,x))
def __radd__(self, x)->"Term":
return cast("Term",_TermAdd_(x, self))
def __rsub__(self, x)->"Term":
return cast("Term",_TermSub_(x, self))
def __rmul__(self, x)->"Term":
return cast("Term",_TermMul_(x,self))
def __rdiv__(self, x)->"Term":
return cast("Term",_TermDiv_(x,self))
def __rpow__(self, exponent)->"Term":
assert(exponent==2)
return _TermQuadratic_(1, self)
def __req__(self, x)->"Term":
return cast("Term",_TermEqual_(x,self))
def __rgt__(self, x)->"Term":
return cast("Term",_TermGt_(x,self))
def __rlt__(self, x)->"Term":
return cast("Term",_TermLt_(x,self))
def __rand__(self, x)->"Term":
return cast("Term", _TermAnd_(x,self))
class TermAtom(Term):
def getTag(self)->int:
return TERM_ATOM
def __init__(self, x):
self.tag = self.getTag()
self.atom = x
def __repr__(self):
return "TermAtom(" + repr(self.atom) + ")"
class TermSymbol(Term):
def getTag(self)->int:
return TERM_SYMBOL
def __init__(self, name: str):
self.tag = self.getTag()
self.name = name
def __repr__(self):
return "TermSymbol('" + self.name +"')"
class TermAdd(Term):
def getTag(self)->int:
return TERM_ADD
def __init__(self, x, y):
self.tag = self.getTag()
self.left = cast(Term,_Wrapper_(x))
self.right = cast(Term,_Wrapper_(y))
def __repr__(self):
return "TermAdd(" + str(self.left) + "," + str(self.right) + ")"
class TermSub(Term):
def getTag(self)->int:
return TERM_SUB
def __init__(self, x, y):
self.tag = self.getTag()
self.left = cast(Term,_Wrapper_(x))
self.right = cast(Term,_Wrapper_(y))
def __repr__(self):
return "TermSub(" + str(self.left) + "," + str(self.right) + ")"
class TermMul(Term):
def getTag(self)->int:
return TERM_MUL
def __init__(self, x, y):
self.tag = self.getTag()
self.left = cast(Term,_Wrapper_(x))
self.right = cast(Term,_Wrapper_(y))
def __repr__(self):
return "TermMul(" + str(self.left) + "," + str(self.right) + ")"
class TermDiv(Term):
def getTag(self)->int:
return TERM_DIV
def __init__(self, x, y):
self.tag = self.getTag()
self.left = cast(Term,_Wrapper_(x))
self.right = cast(Term,_Wrapper_(y))
def __repr__(self):
return "TermDiv(" + str(self.left) + "," + str(self.right) + ")"
class TermEqual(Term):
def getTag(self)->int:
return TERM_EQUAL
def __init__(self, x, y):
self.tag = self.getTag()
self.left = cast(Term,_Wrapper_(x))
self.right = cast(Term,_Wrapper_(y))
def __repr__(self):
return "TermEqual(" + str(self.left) + "," + str(self.right) + ")"
class TermGt(Term):
def getTag(self)->int:
return TERM_GT
def __init__(self, x, y):
self.tag = self.getTag()
self.left = cast(Term,_Wrapper_(x))
self.right = cast(Term,_Wrapper_(y))
def __repr__(self):
return "TermGt(" + str(self.left) + "," + str(self.right) + ")"
class TermLt(Term):
def getTag(self)->int:
return TERM_LT
def __init__(self, x, y):
self.tag = self.getTag()
self.left = cast(Term,_Wrapper_(x))
self.right = cast(Term,_Wrapper_(y))
def __repr__(self):
return "TermLt(" + str(self.left) + "," + str(self.right) + ")"
class TermAnd(Term):
def getTag(self)->int:
return TERM_AND
def __init__(self, x, y):
self.tag = self.getTag()
self.left = cast(Term,_Wrapper_(x))
self.right = cast(Term,_Wrapper_(y))
def __repr__(self):
return "TermAnd(" + str(self.left) + "," + str(self.right) + ")"
class TermGetItem(Term):
def getTag(self)->int:
return TERM_GETITEM
def __init__(self, x, y):
self.tag = self.getTag()
self.left = cast(Term,_Wrapper_(x))
self.right = cast(Term,_Wrapper_(y))
def __repr__(self):
return "TermGetItem(" + str(self.left) + "," + str(self.right) + ")"
class TermDeclare(Term):
def getTag(self)->int:
return TERM_DECLARE
def __init__(self, x, y):
self.tag = self.getTag()
self.left = cast(Term,_Wrapper_(x))
self.right = cast(Term,_Wrapper_(y))
def __repr__(self):
return "TermDeclare(" + str(self.left) + "," + str(self.right) + ")"
class TermBitsize(Term):
def getTag(self)->int:
return TERM_BITSIZE
def __init__(self, x, y):
self.tag = self.getTag()
self.left = cast(Term,_Wrapper_(x))
self.right = cast(Term,_Wrapper_(y))
def __repr__(self):
return "TermBitsize(" + str(self.left) + "," + str(self.right) + ")"
class TermLabel(Term):
def getTag(self)->int:
return TERM_LABEL
def __init__(self, x, y):
self.tag = self.getTag()
self.left = cast(Term,_Wrapper_(x))
self.right = cast(Term,_Wrapper_(y))
def __repr__(self):
return "TermLabel(" + str(self.left) + "," + str(self.right) + ")"
class TermQuadratic(Term):
def getTag(self)->int:
return TERM_QUADRATIC
def __init__(self, m, x):
self.tag = self.getTag()
self.m = cast(Term,_Wrapper_(m))
self.x = cast(Term,_Wrapper_(x))
def __repr__(self):
return "TermQuadratic(" + str(self.m) + "," + str(self.x) + ")"
class TermMaximize(Term):
def getTag(self)->int:
return TERM_MAXIMIZE
def __init__(self, m, x):
self.tag = self.getTag()
self.m = cast(Term,_Wrapper_(m))
self.x = cast(Term,_Wrapper_(x))
def __repr__(self):
return "TermMaximize(" + str(self.m) + "," + str(self.x) + ")"
class TermMinimize(Term):
def getTag(self)->int:
return TERM_MINIMIZE
def __init__(self, m, x):
self.tag = self.getTag()
self.m = cast(Term,_Wrapper_(m))
self.x = cast(Term,_Wrapper_(x))
def __repr__(self):
return "TermMinimize(" + str(self.m) + "," + str(self.x) + ")"
class TermMaximum(Term):
def getTag(self)->int:
return TERM_MAXIMUM
def __init__(self, x):
self.tag = self.getTag()
self.x = cast(Term,_Wrapper_(x))
def __repr__(self):
return "TermMaximum(" + str(self.x) + ")"
class TermMinimum(Term):
def getTag(self)->int:
return TERM_MINIMUM
def __init__(self, x):
self.tag = self.getTag()
self.x = cast(Term,_Wrapper_(x))
def __repr__(self):
return "TermMinimum(" + str(self.x) + ")"
OPT_TERM_EQ=_operatorsIota()
OPT_TERM_GT=_operatorsIota()
OPT_TERM_LT=_operatorsIota()
OPT_TERM_MINIMIZE=_operatorsIota()
OPT_TERM_MAXIMIZE=_operatorsIota()
OPT_TERM_GT_MIN=_operatorsIota()
OPT_TERM_LT_MIN=_operatorsIota()
OPT_TERM_GT_MAX=_operatorsIota()
OPT_TERM_LT_MAX=_operatorsIota()
# operator, label, quad/lin part
class OptTerm(Term):
def getTag(self)->int:
return TERM_OPT
def __init__(self, quad, lin, op, cmp, label: str):
self.quad = quad
self.lin = lin
self.op = op
self.cmp = cmp
self.label = label
def __repr__(self):
def opStr(op: int) -> str:
if op == OPT_TERM_EQ:
return "OPT_TERM_EQ"
if op == OPT_TERM_GT:
return "OPT_TERM_GT"
if op == OPT_TERM_LT:
return "OPT_TERM_LT"
if op == OPT_TERM_MINIMIZE:
return "OPT_TERM_MINIMIZE"
if op == OPT_TERM_MAXIMIZE:
return "OPT_TERM_MAXIMIZE"
if op == OPT_TERM_GT_MIN:
return "OPT_TERM_GT_MIN"
if op == OPT_TERM_LT_MIN:
return "OPT_TERM_LT_MIN"
if op == OPT_TERM_GT_MAX:
return "OPT_TERM_GT_MAX"
if op == OPT_TERM_LT_MAX:
return "OPT_TERM_LT_MAX"
return str(op)
return "OptTerm(" + repr(self.quad) + "," + repr(self.lin) + "," + opStr(self.op) + "," + repr(self.cmp) + "," + repr(self.label) + ")"
# Opt terms & declare & weight declares
class Target(Term):
def getTag(self)->int:
return TERM_TARGET
def __init__(self, components: List[OptTerm], annotations: List[Term], positions: Dict[str,int], sizes: Dict[str,int]):
self.components = components
self.annotations = annotations
self.positions = positions
self.sizes = sizes
def __repr__(self):
return "Target(" + repr(self.components) + "," + repr(self.annotations) + "," + repr(self.positions) + "," + repr(self.sizes) + ")"
# Links
_TermAdd_ = TermAdd
_TermSub_ = TermSub
_TermMul_ = TermMul
_TermDiv_ = TermDiv
_TermAnd_ = TermAnd
_TermEqual_ = TermEqual
_TermGt_ = TermGt
_TermLt_ = TermLt
_TermQuadratic_ = TermQuadratic
_TermGetItem_ = TermGetItem
# helpers
def sym(x:str)->TermSymbol:
return TermSymbol(x)
def wrap(x: Any)->Term:
if isinstance(x, int):
return TermAtom(x)
if isinstance(x, float):
return TermAtom(x)
if isinstance(x, np.float32):
return TermAtom(x)
if isinstance(x, np.float64):
return TermAtom(x)
if isinstance(x, np.ndarray):
return TermAtom(x)
if isinstance(x, str):
return TermSymbol(x)
return x
def quadratic(m: Any, x: Any)->Term:
return TermQuadratic(m, x)
def maximize(v,expr):
return TermMaximize(v,expr)
def minimize(v,expr):
return TermMinimize(v,expr)
def sum(expr):
return wrap(1)*expr
def minimum(expr):
return TermMinimum(expr)
def maximum(expr):
return TermMaximum(expr)
def declare(a,b):
return TermDeclare(a,b)
def label(a,b):
return TermLabel(a,b)
def bitsize(a,b):
return TermBitsize(a,b)
_Wrapper_ = wrap
class PositionsHints:
def __init__(self):
self.at : Dict[str, int] = {}
self.inside : Dict[str, str] = {}
self.minSize : Dict[str, int] = {}
def __repr__(self):
return ("PositionHints("
+ repr(self.at) + ","
+ repr(self.inside) + ","
+ repr(self.minSize)
+ ")")
def getProblemSize(self):
mm = 0
for k in self.at:
mm = max(self.at[k] + self.minSize[k], mm) # ex: at 0, size=1 => 1 elt
return mm
def estimateShapeOfExpression(h: PositionsHints, t: Term) -> Tuple[int,int]:
tag = t.getTag()
if tag == TERM_ATOM:
ta = cast(TermAtom, t)
a = ta.atom
if isinstance(a, np.ndarray):
s = a.shape
if len(s) == 2:
return s
return (s[0],1)
return (1,1)
if tag == TERM_SYMBOL:
ts = cast(TermSymbol, t)
s = ts.name
if s in h.minSize:
return (h.minSize[s],1)
return (1,1)
if tag == TERM_ADD:
tadd = cast(TermAdd, t)
s1 = estimateShapeOfExpression(h, tadd.left)
s2 = estimateShapeOfExpression(h, tadd.right)
return (max(s1[0],s2[0]), max(s1[1],s2[1]))
if tag == TERM_SUB:
tsub = cast(TermSub, t)
s1 = estimateShapeOfExpression(h, tsub.left)
s2 = estimateShapeOfExpression(h, tsub.right)
return (max(s1[0],s2[0]), max(s1[1],s2[1]))
if tag == TERM_MUL:
# convention
# vector * vector => scalar
# v * matrix or matrix *v => matrix prod
tmul = cast(TermMul, t)
s1 = estimateShapeOfExpression(h, tmul.left)
s2 = estimateShapeOfExpression(h, tmul.right)
s1v = s1[1] == 1
s2v = s2[1] == 1
#print(s1v,s2v)
if s1v and s2v:
# vector/vector => scalar => 1
return (1,1)
if not(s1v) and s2v:
# matrix * vector: vector(lines(matrix))
return (s1[0],1)
if s1v and not(s2v):
# vector transpose * matrix => vector(cols(matrix))
return (s2[1],1)
if not(s1v) and not(s2v):
# matrix matrix
return (s1[0], s2[1])
return (1,1)
if tag == TERM_QUADRATIC:
return (1,1)
return (1,1)
def propagateShape(h: PositionsHints, t: Term, sh: Tuple[int,int]):
tag = t.getTag()
if tag == TERM_ATOM:
ta = cast(TermAtom, t)
a = ta.atom
if isinstance(a, np.ndarray):
s = a.shape
if len(s) == 2:
if s == (1,1):
v = s[0,0]
a = np.diag([v for k in range(sh[0])], dtype=np.float64)
a = ta.atom
#print(" * resize matrix", a.shape)
else:
if sh[0] > s[0]:
v = s[0]
a = np.array([v for k in range(sh[0])])
ta.atom = a
#print("resize matrix", a.shape)
if isinstance(a, int) or isinstance(a, float) or isinstance(a, np.float32) or isinstance(a, np.float64):
if sh[1]==1:
a = np.array([a for k in range(sh[0])], dtype=np.float64)
ta.atom = a
#print("resize scalar to vector", a.shape)
else:
a = np.diag([a for k in range(sh[0])], dtype=np.float64)
ta.atom = a
#print("resize scalar to diag matrix", a.shape)
pass
if tag == TERM_SYMBOL:
ts = cast(TermSymbol, t)
s = ts.name
if s in h.minSize:
_s2 = h.minSize[s]
_sf = max(sh[0], _s2)
h.minSize[s] = _sf
#print("symbol",s,"resized to",_sf)
else:
h.minSize[s] = sh[0]
#print("symbol",s,"acquired size",sh[0])
if tag == TERM_ADD:
tadd = cast(TermAdd, t)
s1 = estimateShapeOfExpression(h, tadd.left)
s2 = estimateShapeOfExpression(h, tadd.right)
sf = (max(sh[0],max(s1[0],s2[0])), max(sh[1],max(s1[1],s2[1])))
propagateShape(h, tadd.left, sh)
propagateShape(h, tadd.right, sh)
if tag == TERM_SUB:
tsub = cast(TermSub, t)
s1 = estimateShapeOfExpression(h, tsub.left)
s2 = estimateShapeOfExpression(h, tsub.right)
sf = (max(sh[0],max(s1[0],s2[0])), max(sh[1],max(s1[1],s2[1])))
propagateShape(h, tsub.left, sh)
propagateShape(h, tsub.right, sh)
if tag == TERM_MUL:
# convention
# vector * vector => scalar
# v * matrix or matrix *v => matrix prod
tmul = cast(TermMul, t)
s1 = estimateShapeOfExpression(h, tmul.left)
s2 = estimateShapeOfExpression(h, tmul.right)
s1v = s1[1] == 1
s2v = s2[1] == 1
#print(s1v,s2v)
if s1v and s2v:
# vector/vector => scalar => 1
# s1 and s2 must be equals~
sf = (max(sh[0],max(s1[0],s2[0])), 1)
propagateShape(h, tmul.left, sf)
propagateShape(h, tmul.right, sf)
if not(s1v) and s2v:
# matrix * vector: vector(lines(matrix))
## return (s1[0],1)
sz = max(s1[0], sh[0])
propagateShape(h,tmul.left,(sz,s1[1]))
if s1v and not(s2v):
# vector transpose * matrix => vector(cols(matrix))
## return (s2[1],1)
sz = max(s2[1], sh[0])
propagateShape(h,tmul.right, (sz,s2[1]))
if not(s1v) and not(s2v):
# matrix matrix
## return (s1[0], s2[1])
sz1 = max(s1[0], sh[0])
sz2 = max(s2[1], sh[1])
propagateShape(h, tmul.left, (sz1, s1[1]))
propagateShape(h, tmul.right, (s2[0], sz2))
pass
if tag == TERM_QUADRATIC:
tq = cast(TermQuadratic, t)
shM = estimateShapeOfExpression(h,tq.m)
shX = estimateShapeOfExpression(h,tq.x)
sz = max(shM[0], shX[0])
propagateShape(h, tq.m, (sz,sz))
propagateShape(h, tq.x, (sz,1))
# propagate to other types
if tag == TERM_EQUAL:
te = cast(TermEqual, t)
s1 = estimateShapeOfExpression(h, te.left)
s2 = estimateShapeOfExpression(h, te.right)
sf = (max(sh[0],max(s1[0],s2[0])), max(sh[1],max(s1[1],s2[1])))
propagateShape(h, te.left, sh)
propagateShape(h, te.right, sh)
if tag == TERM_LT:
tlt = cast(TermLt, t)
s1 = estimateShapeOfExpression(h, tlt.left)
s2 = estimateShapeOfExpression(h, tlt.right)
sf = (max(sh[0],max(s1[0],s2[0])), max(sh[1],max(s1[1],s2[1])))
propagateShape(h, tlt.left, sh)
propagateShape(h, tlt.right, sh)
if tag == TERM_GT:
tgt = cast(TermGt, t)
s1 = estimateShapeOfExpression(h, tgt.left)
s2 = estimateShapeOfExpression(h, tgt.right)
sf = (max(sh[0],max(s1[0],s2[0])), max(sh[1],max(s1[1],s2[1])))
propagateShape(h, tgt.left, sh)
propagateShape(h, tgt.right, sh)
if tag == TERM_AND:
tand = cast(TermAnd, t)
propagateShape(h, tand.left, (1,1))
propagateShape(h, tand.right, (1,1))
if tag == TERM_GETITEM:
tget = cast(TermGetItem, t)
propagateShape(h, tget.left, (1,1))
propagateShape(h, tget.right, (1,1))
if tget.left.getTag() == TERM_SYMBOL and tget.right.getTag() == TERM_SYMBOL:
sym1 = cast(TermSymbol, tget.left)
sym2 = cast(TermSymbol, tget.right)
h.inside[sym2.name] = sym1.name
if tag == TERM_LABEL:
tlabel = cast(TermLabel, t)
# NO! propagateShape(h, tlabel.left, (1,1))
propagateShape(h, tlabel.right, sh) #!!!!
if tag == TERM_MAXIMIZE:
tmaxi = cast(TermMaximize, t)
##propagateShape(h, tmaxi.m, (1,1))
propagateShape(h, tmaxi.x, (1,1))
if tag == TERM_MINIMIZE:
tmini = cast(TermMinimize, t)
##propagateShape(h, tmini.m, (1,1))
propagateShape(h, tmini.x, (1,1))
if tag == TERM_DECLARE:
tdecl = cast(TermDeclare, t)
if tdecl.left.getTag() == TERM_SYMBOL and tdecl.right.getTag() == TERM_ATOM:
sym1 = cast(TermSymbol, tdecl.left)
at1 = cast(TermAtom, tdecl.right)
h.at[sym1.name] = int(at1.atom)
pass
def fillSlots(sizes: Dict[str,int], positions: Dict[str, int])->List[str]:
rv: List[str] = []
def getAt(x: int)->str:
if len(rv) > x:
return rv[x]
else:
for i in range(x - len(rv) + 1):
rv.append("") # => void
return ""
def setAt(x: int, v: str):
if len(rv) > x:
rv[x] = v
else:
for i in range(x - len(rv) + 1):
rv.append("") # => void
rv[x] = v
for p in positions:
setAt(positions[p], p)
if sizes[p] > 1:
for i in range(sizes[p]-1):
setAt(positions[p]+1, "#")
szs: List[Tuple[str,int]] = []
for k in sizes:
szs.append((k, sizes[k]))
szs.sort(key = lambda x:x[1])
#print("sorted", szs)
def findPosition(n: str, s:int):
if n in positions:
return positions[n]
cursor = 0
failedAt = 0
def isCursorOk() -> bool:
nonlocal cursor
nonlocal failedAt
failedAt = 0
for i in range(s):
if getAt(cursor + i) != "":
failedAt = i
#print("failed @ ", i)
return False
#print("accept, cursor = ", cursor)
return True
while not isCursorOk():
cursor = cursor + failedAt+1
#print("cursor updated = ", cursor)
return cursor
for i in range(len(szs)):
#print("before insert, rv=", rv)
n = szs[len(szs)-1-i][0]
size = szs[len(szs)-1-i][1]
pos = findPosition(n, size)
positions[n] = pos
setAt(pos, n)
if sizes[n] > 1:
for i in range(sizes[n]-1):
setAt(pos+1+i, "#")
#print("rv= ", rv)
#print(n,size,"@",pos)
#print("var map ", rv)
return rv
def findSizeAndPositions(h: PositionsHints, t: Term):
propagateShape(h, t, (1,1))
propagateShape(h, t, (1,1))
#print(">>", h)
hints: Dict[str,int] = {}
pos: Dict[str, int] = {}
for k in h.minSize:
if k in h.inside:
pass
else:
hints[k] = h.minSize[k]
if k in h.at:
pos[k] = h.at[k]
#print("fillSlots", hints, pos)
fillSlots(hints, pos)
for k in pos:
h.at[k] = pos[k]
for k in hints:
h2: Dict[str,int] = {}
p2: Dict[str,int] = {}
for p in h.inside:
if h.inside[p] == k:
h2[p] = h.minSize[p]
if p in h.at:
p2[p] = h.at[p]
if len(h2) > 0:
fillSlots(h2,p2)
for p in p2:
h.at[p] = p2[p]
def extractSquareMatrix(h: PositionsHints, t: Term, windowBase: int, windowSize:int)->Union[None,np.ndarray]:
tag = t.getTag()
sz = h.getProblemSize()
if tag==TERM_ATOM:
ta=cast(TermAtom, t)
atom=ta.atom
if isinstance(atom, int):
return np.array([[float(atom)]], dtype=np.float64)
if isinstance(atom, float):
return np.array([[float(atom)]], dtype=np.float64)
if isinstance(atom, np.ndarray):
if atom.shape == (1,):
return atom
if len(atom.shape) == 2:
return atom
return None
def symmetrize(m: np.ndarray)->np.ndarray:
return 0.5*(m+np.transpose(m))
# Affine
def extractMultiDimLinearForm(h: PositionsHints, t: Term, windowBase: int, windowSize:int)->Tuple[np.ndarray, np.ndarray]:
#print("extraMultiDimLinearForm", h, t, windowBase, windowSize)
tag = t.getTag()
sz = h.getProblemSize()
if tag == TERM_SYMBOL:
ts = cast(TermSymbol, t)
n = ts.name
pos = h.at[n]
lsz = h.minSize[n]
if pos >= windowBase + windowSize: ## TODO or .... pos < (and for other places like that)
# no intersect
return (np.zeros((sz,sz), dtype=np.float64), np.array((sz,), dtype=np.float64))
linear = np.zeros((sz,sz), dtype=np.float64)
for i in range(lsz):
if i+pos>=windowBase and i < windowBase+windowSize:
linear[i+pos,i+pos] = 1.0
return (linear, np.zeros((sz,), dtype=np.float64))
if tag == TERM_MUL:
tmul = cast(TermMul, t)
mm1 = extractSquareMatrix(h, tmul.left, windowBase, windowSize)
mm2 = extractSquareMatrix(h, tmul.right, windowBase, windowSize)
def sizeUp(mm, Ax):
#print("sizeUp", mm, Ax)
# find indices, copy
tgtSq = np.zeros((sz, sz),dtype=np.float64)
hasLines = []
for i in range(sz):
hasLine = False
for j in range(sz):
if abs(Ax[i,j]) > 1e-12:
hasLine = True
# NO! if abs(Bx[i]) > 1e-12:
# hasLine = True
hasLines.append(hasLine)
#print("hasLines =", hasLines)
cursori = 0
for i in range(sz):
cursorj = 0
for j in range(sz):
if hasLines[i] and hasLines[j]:
tgtSq[i,j] = mm[cursori, cursorj]
if hasLines[j]:
cursorj = cursorj+1
if hasLines[i]:
cursori = cursori+1
#print(tgtSq)
return tgtSq
(Ax, Bx) = (None, None)
try:
(Ax, Bx) = extractMultiDimLinearForm(h, tmul.left, windowBase, windowSize)
except:
pass
(Ay, By) = (None, None)
try:
(Ay, By) = extractMultiDimLinearForm(h, tmul.right, windowBase, windowSize)
except:
pass
if mm1 is None:
if mm2 is None:
# last resort/desperate and of course false in general
# (Ax,Bx) (x) (Ay,By) ~ By^t (Ax,Bx) + Bx^t (Ay,By) + Bx .. By
# will it even work?
return (np.matmul(np.transpose(By),Ax) + np.matmul(np.transpose(Bx),Ay), np.matmul(np.transpose(Bx), By))
else:
# (Ax,Bx) (x) matrix ~ matrix^t * (Ax,Bx)
mm2_ = sizeUp(mm2, Ax)
mm2t = np.transpose(mm2_)
##print("mul/757", mm2t, Ax, Bx)
return (np.matmul(mm2t,Ax), np.matmul(mm2t, Bx))
else:
if mm2 is None:
mm1_ = sizeUp(mm1, Ay)
##print("mul/785", mm1_, Ay, By)
return (np.matmul(mm1_, Ay), np.matmul(mm1_,By))
else:
# panic!
raise Exception("mat * mat => non linear")
if tag == TERM_SUB:
tsub = cast(TermSub, t)
(Ax, Bx) = extractMultiDimLinearForm(h, tsub.left, windowBase, windowSize)
(Ay, By) = extractMultiDimLinearForm(h, tsub.right, windowBase, windowSize)
return (Ax-Ay, Bx-By)
if tag == TERM_ADD:
tadd = cast(TermAdd, t)
(Ax, Bx) = extractMultiDimLinearForm(h, tadd.left, windowBase, windowSize)
(Ay, By) = extractMultiDimLinearForm(h, tadd.right, windowBase, windowSize)
return (Ax+Ay, Bx+By)
if tag == TERM_ATOM:
tatom = cast(TermAtom, t)
atom = tatom.atom
#print("804", atom)
if isinstance(atom, int):
return (np.zeros((sz,sz), dtype=np.float64), np.array([float(atom) for i in range(sz)], dtype=np.float64))
if isinstance(atom, float):
return (np.zeros((sz,sz), dtype=np.float64), np.array([float(atom) for i in range(sz)], dtype=np.float64))
if isinstance(atom, np.ndarray):
# here, better be sure that you are at 0... no time for it at this stage ~~~~
if len(atom.shape) == 2:
raise Exception("lin terms only here!")
rv = np.zeros((sz,), dtype=np.float64)
for i in range(atom.shape[0]):
rv[i] = atom[i]
if (sz > atom.shape[0]): # UGLY, needed for expressions like (x-1)**2 + (y-2)**2, LT solution is to add a /range/ info the the args
for i in range(sz - atom.shape[0]):
rv[i+atom.shape[0]] = atom[0]
return (np.zeros((sz,sz), dtype=np.float64), rv)
raise Exception("Term type not supported")
def extractQuadraticForm(h: PositionsHints, t: Term, windowBase: int, windowSize:int)->Tuple[np.ndarray,np.ndarray,float]:
tag = t.getTag()
sz = h.getProblemSize()
if tag == TERM_SYMBOL:
ts = cast(TermSymbol, t)
n = ts.name
pos = h.at[n]
lsz = h.minSize[n]
if pos >= windowBase + windowSize:
# no intersect
return (np.zeros((sz,sz), dtype=np.float64), np.array((sz,), dtype=np.float64), 0.0)
linear = np.zeros((sz,), dtype=np.float64)
quad = np.zeros((sz,sz), dtype=np.float64)
for i in range(lsz):
if pos+i>=windowBase and pos+i < windowBase+windowSize:
linear[i+pos] = 1.0
return (quad, linear, 0.0)
if tag == TERM_ADD:
ta = cast(TermAdd, t)
q1,l1,s1 = extractQuadraticForm(h, ta.left, windowBase, windowSize)
q2,l2,s2 = extractQuadraticForm(h, ta.right, windowBase, windowSize)
return (symmetrize(q1+q2), l1+l2, s1+s2)
if tag == TERM_SUB:
tsub = cast(TermSub, t)
q1,l1,s1 = extractQuadraticForm(h, tsub.left, windowBase, windowSize)
q2,l2,s2 = extractQuadraticForm(h, tsub.right, windowBase, windowSize)
return (symmetrize(q1-q2), l1-l2, s1-s2)
if tag == TERM_MUL:
tm = cast(TermMul, t)
try:
q1,l1,s1 = extractQuadraticForm(h, tm.left, windowBase, windowSize)
q2,l2,s2 = extractQuadraticForm(h, tm.right, windowBase, windowSize)
# notice this makes quad * quad = 0 [sorry for this version: this is a demo]
#print("quadratic mul:", (q1,l1,s1), (q2,l2,s2))
debug1 = np.matmul(np.transpose(np.reshape(l1, (l1.shape[0],1))),np.reshape(l2,(l2.shape[0],1)))
debug2 = np.transpose(np.reshape(l1, (l1.shape[0],1))),np.reshape(l2,(l2.shape[0],1))
#print("debug", debug1, debug2)
return (symmetrize(s1 * q2 + s2 * q2 + np.matmul(np.reshape(l1, (l1.shape[0],1)),np.transpose(np.reshape(l2,(l2.shape[0],1))))), l1*s2 + l2*s1, s1*s2)
except:
A1, B1 = extractMultiDimLinearForm(h, tm.left, windowBase, windowSize)
A2, B2 = extractMultiDimLinearForm(h, tm.right, windowBase, windowSize)
#print("MUL", (A1,B1), (A2,B2))
return (symmetrize(np.matmul(np.transpose(A1),A2)), np.matmul(np.transpose(B1), A2) + np.matmul(np.transpose(B2), A1), np.dot(B1,B2))
if tag == TERM_ATOM:
tatom = cast(TermAtom, t)
# refute non-scalar quantities
a = tatom.atom
if isinstance(a,int):
return (np.zeros((sz,sz), dtype=np.float64), np.zeros((sz,), dtype=np.float64), float(a))
if isinstance(a,float):
return (np.zeros((sz,sz), dtype=np.float64), | np.zeros((sz,), dtype=np.float64) | numpy.zeros |
"""Operations for dannce."""
import numpy as np
import cv2
import time
from typing import Text
import torch
import torch.nn.functional as F
class Camera:
def __init__(self, R, t, K, tdist, rdist, name=""):
self.R = | np.array(R) | numpy.array |
def test_sinc_differentation(show=False):
"""Test the differentation routine of ChebyshevRationalGrid"""
from psecas import SincGrid
import numpy as np
def psi(x, c):
from numpy.polynomial.hermite import hermval
return hermval(x, c) * np.exp(-x ** 2 / 2)
def dpsi(x, c):
"""Derivative of psi"""
from numpy.polynomial.hermite import hermval, hermder
yp = hermval(x, hermder(c)) * np.exp(-x ** 2 / 2) - x * psi(x, c)
return yp
def d2psi(x, c):
"""Second derivative of psi"""
from numpy.polynomial.hermite import hermval, hermder
yp = hermval(x, hermder(hermder(c))) * np.exp(-x ** 2 / 2)
yp += -x * hermval(x, hermder(c)) * | np.exp(-x ** 2 / 2) | numpy.exp |
##### test 버전 만들 때 주의 사항 ####
#### 1. test 시나리오 파일로 변경
#### 2. phase_txt 파일 만들기 위한 init 함수, step 함수 변경
import gym
import shutil
import uuid
from gym import error, spaces, utils
from gym.utils import seeding
import sys
import os
import numpy as np
from xml.etree.ElementTree import parse
import collections
import math
from config import TRAIN_CONFIG
print(TRAIN_CONFIG)
sys.path.append(TRAIN_CONFIG['libsalt_dir'])
import libsalt
state_weight = 1
reward_weight = 1
addTime = 1
control_cycle = 3
IS_DOCKERIZE = True
if IS_DOCKERIZE:
import json
import platform
def getScenarioRelatedFilePath(args):
abs_scenario_file_path = '{}/{}'.format(os.getcwd(), args.scenario_file_path)
input_file_path = os.path.dirname(abs_scenario_file_path)
if platform.system() == 'Windows': # one of { Windows, Linux , Darwin }
dir_delimiter = "\\"
else:
dir_delimiter = "/"
with open(abs_scenario_file_path, 'r') as json_file:
json_data = json.load(json_file)
node_file = json_data["scenario"]["input"]["node"]
edge_file = json_data["scenario"]["input"]["link"]
tss_file = json_data["scenario"]["input"]["trafficLightSystem"]
node_file_path = input_file_path + dir_delimiter + node_file
edge_file_path = input_file_path + dir_delimiter + edge_file
tss_file_path = input_file_path + dir_delimiter + tss_file
return abs_scenario_file_path, node_file_path, edge_file_path, tss_file_path
def getScenarioRelatedBeginEndTime(scenario_file_path):
abs_scenario_file_path = '{}/{}'.format(os.getcwd(), scenario_file_path)
with open(abs_scenario_file_path, 'r') as json_file:
json_data = json.load(json_file)
begin_time = json_data["scenario"]["time"]["begin"]
end_time = json_data["scenario"]["time"]["end"]
return begin_time, end_time
def H(s):
probabilities = [n_x/len(s) for x,n_x in collections.Counter(s).items()]
e_x = [-p_x*math.log(p_x,2) for p_x in probabilities]
return sum(e_x)
class SALT_doan_multi_PSA(gym.Env):
metadata = {'render.modes': ['human']}
def __init__(self, args):
self.state_weight = state_weight
self.reward_weight = reward_weight
self.addTime = addTime
self.reward_func = args.reward_func
self.actionT = args.action_t
self.logprint = args.logprint
if IS_DOCKERIZE:
scenario_begin, scenario_end = getScenarioRelatedBeginEndTime(args.scenario_file_path)
# self.startStep = args.trainStartTime if args.trainStartTime > scenario_begin else scenario_begin
# self.endStep = args.trainEndTime if args.trainEndTime < scenario_end else scenario_end
self.startStep = args.start_time if args.start_time > scenario_begin else scenario_begin
self.endStep = args.end_time if args.end_time < scenario_end else scenario_end
else:
self.startStep = args.trainStartTime
self.endStep = args.trainEndTime
self.dir_path = os.path.dirname(os.path.realpath(__file__))
self.uid = str(uuid.uuid4())
if IS_DOCKERIZE:
abs_scenario_file_path = '{}/{}'.format(os.getcwd(), args.scenario_file_path)
self.src_dir = os.path.dirname(abs_scenario_file_path)
self.dest_dir = os.path.split(self.src_dir)[0]
self.dest_dir = '{}/data/{}/'.format(self.dest_dir, self.uid)
os.makedirs(self.dest_dir, exist_ok=True)
else:
self.src_dir = os.getcwd() + "/data/envs/salt/doan"
self.dest_dir = os.getcwd() + "/data/envs/salt/data/" + self.uid + "/"
os.mkdir(self.dest_dir)
src_files = os.listdir(self.src_dir)
for file_name in src_files:
full_file_name = os.path.join(self.src_dir, file_name)
if os.path.isfile(full_file_name):
shutil.copy(full_file_name, self.dest_dir)
if IS_DOCKERIZE:
scenario_file_name = args.scenario_file_path.split('/')[-1]
self.salt_scenario = "{}/{}".format(self.dest_dir, scenario_file_name)
if 0:
_, _, edge_file_path, tss_file_path = getScenarioRelatedFilePath(args)
else:
edge_file_path = "magic/doan_20210401.edg.xml"
tss_file_path = "magic/doan(without dan).tss.xml"
tree = parse(tss_file_path)
else:
# self.salt_scenario = self.dest_dir + 'doan_2021_actionT{}.scenario.json'.format(self.actionT)
self.salt_scenario = self.dest_dir + 'doan_2021.scenario.json'
tree = parse(os.getcwd() + '/data/envs/salt/doan/doan(without dan).tss.xml')
root = tree.getroot()
trafficSignal = root.findall("trafficSignal")
self.target_tl_obj = {}
self.phase_numbers = []
i=0
if IS_DOCKERIZE:
self.targetList_input = args.target_TL.split(',')
else:
self.targetList_input = args.targetTL.split(',')
self.targetList_input2 = []
for tl_i in self.targetList_input:
self.targetList_input2.append(tl_i) ## ex. SA 101
self.targetList_input2.append(tl_i.split(" ")[1]) ## ex.101
self.targetList_input2.append(tl_i.replace(" ", "")) ## ex. SA101
for x in trafficSignal:
if x.attrib['signalGroup'] in self.targetList_input2:
self.target_tl_obj[x.attrib['nodeID']] = {}
self.target_tl_obj[x.attrib['nodeID']]['crossName'] = x.attrib['crossName']
self.target_tl_obj[x.attrib['nodeID']]['signalGroup'] = x.attrib['signalGroup']
self.target_tl_obj[x.attrib['nodeID']]['offset'] = int(x.find('schedule').attrib['offset'])
self.target_tl_obj[x.attrib['nodeID']]['minDur'] = [int(y.attrib['minDur']) if 'minDur' in y.attrib else int(y.attrib['duration']) for
y in x.findall("schedule/phase")]
self.target_tl_obj[x.attrib['nodeID']]['maxDur'] = [int(y.attrib['maxDur']) if 'maxDur' in y.attrib else int(y.attrib['duration']) for
y in x.findall("schedule/phase")]
self.target_tl_obj[x.attrib['nodeID']]['cycle'] = np.sum([int(y.attrib['duration']) for y in x.findall("schedule/phase")])
self.target_tl_obj[x.attrib['nodeID']]['duration'] = [int(y.attrib['duration']) for y in x.findall("schedule/phase")]
tmp_duration_list = np.array([int(y.attrib['duration']) for y in x.findall("schedule/phase")])
# self.target_tl_obj[x.attrib['nodeID']]['green_idx'] = np.where(tmp_duration_list > 5)
self.target_tl_obj[x.attrib['nodeID']]['green_idx'] = np.where( | np.array(self.target_tl_obj[x.attrib['nodeID']]['minDur']) | numpy.array |
import os
from glob import glob
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
plt.rcParams.update({'font.size': 5})
plt.rcParams.update({'lines.linewidth':0.35})
plt.rcParams.update({'axes.linewidth':0.35})
plt.rcParams.update({'lines.markersize':2.5})
plt.rcParams.update({'axes.labelpad':1.5})
fig = plt.figure(figsize=(7.2,6))
grid = plt.GridSpec(18, 10, wspace=4, hspace=15)
ax = fig.add_subplot(grid[:9, :5])
ax.text(0.025, 0.966, 'a', transform=ax.transAxes,
fontsize=8, fontweight='bold', va='top', ha='left')
var_dir = '/home/atom/ongoing/work_worldwide/variance'
region_list = os.listdir(var_dir)
region_nmad = []
region_nsamp = []
for region in region_list:
list_fn_csv = [os.path.join(var_dir,region,f) for f in os.listdir(os.path.join(var_dir,region))]
list_nmad = []
list_nsamp = []
for fn_csv in list_fn_csv:
df = pd.read_csv(fn_csv)
list_nmad.append(df.nmad.values)
list_nsamp.append(df.nsamp.values)
nmad_all = np.stack(list_nmad,axis=1)
nsamp_all = np.stack(list_nsamp,axis=1)
nan_mask = np.all(np.logical_or(np.isnan(nmad_all),nmad_all==0),axis=1)
nmad_final = np.nansum(nmad_all * nsamp_all,axis=1) / np.nansum(nsamp_all,axis=1)
nsamp_final = np.nansum(nsamp_all,axis=1)
nmad_final[nan_mask] = np.nan
nsamp_final[nan_mask] = 0
region_nmad.append(nmad_final)
region_nsamp.append(nsamp_final)
# ax.figure(figsize=(16,9))
slope = df.bin_slope.values
corr = df.bin_corr.values
bin_slope = sorted(list(set(list(slope))))
bin_corr = sorted(list(set(list(corr))))
nb_slope = len(bin_slope)
nb_corr = len(bin_corr)
color_list = ['tab:orange','tab:blue','tab:olive','tab:cyan','tab:red','tab:purple','tab:brown','tab:pink','tab:gray','tab:olive']
ls_list = ['solid','dashed','dotted']
# model_var = np.sqrt(3**2 + (20 * np.tan(np.array(5) * np.pi / 180))**2) + (((100-np.array(bin_corr))/100)*20)**1.25
#
# for i in range(len(region_nmad)):
# i = 0
# for j in range(nb_slope-2):
#
# nmad = region_nmad[i]
#
# ax.plot(corr[1:nb_corr],nmad[j*nb_corr+1:j*nb_corr+nb_corr],label='Slope category: '+str(bin_slope[j]-5)+'-'+str(bin_slope[j]+5)+' degrees',color=color_list[j],linestyle=ls_list[i])
#
#
# # ax.plot(bin_corr,model_var,label='model',linewidth=2)
#
# ax.xlabel('Correlation (percent)')
# ax.ylabel('Stable terrain NMAD (m)')
# ax.ylim([0,50])
# ax.legend()
#
x_slope = np.arange(5,45,0.1)
model_var = np.sqrt(3**2 + (40 * np.tan(np.array(x_slope) * np.pi / 180))**2.5 + (((100-np.array(50))/100)*20)**2)
i=0
# for i in range(len(region_nmad)-1):
u=0
for j in np.arange(1,nb_corr,2):
nmad = region_nmad[i]
# ax.plot(bin_slope,nmad[np.arange(j,len(slope),nb_corr)],label='region: '+region_list[i]+', corr: '+str(bin_corr[j]),color=color_list[j],linestyle=ls_list[i])
ax.plot(bin_slope[:-2],nmad[np.arange(j,len(slope)-2*nb_corr,nb_corr)]**2,label='Empirical variance: $q$='+str(int(bin_corr[j]-5))+'-'+str(int(bin_corr[j]+5))+' %',color=color_list[u],linestyle=ls_list[i],marker='o',lw=0.5)
u+=1
model_var = np.sqrt(3**2 + ((20+(((100-np.array(100))/100)*20)) * np.tan(np.array(x_slope) * np.pi / 180))**2 + (((100-np.array(95))/100)*15)**2.5)
ax.plot(x_slope,model_var**2,label='Modelled: center of above\ncategories',linestyle='dashed',color='black',lw=0.5)
model_var = np.sqrt(3**2 + ((20+(((100-np.array(80))/100)*20)) * np.tan(np.array(x_slope) * np.pi / 180))**2 + (((100-np.array(75))/100)*15)**2.5)
ax.plot(x_slope,model_var**2,linestyle='dashed',color='black',lw=0.5)
model_var = np.sqrt(3**2 + ((20+(((100-np.array(60))/100)*20)) * np.tan(np.array(x_slope) * np.pi / 180))**2 + (((100-np.array(55))/100)*15)**2.5)
ax.plot(x_slope,model_var**2,linestyle='dashed',color='black',lw=0.5)
model_var = np.sqrt(3**2 + ((20+(((100-np.array(40))/100)*20)) * np.tan(np.array(x_slope) * np.pi / 180))**2 + (((100-np.array(35))/100)*15)**2.5)
ax.plot(x_slope,model_var**2,linestyle='dashed',color='black',lw=0.5)
model_var = np.sqrt(3**2 + ((20+(((100-np.array(20))/100)*20)) * np.tan(np.array(x_slope) * np.pi / 180))**2 + (((100-np.array(15))/100)*15)**2.5)
ax.plot(x_slope,model_var**2,linestyle='dashed',color='black',lw=0.5)
ax.set_xlabel('Slope $\\alpha$ (degrees)')
ax.set_ylabel('Variance of elevation differences (m$^{2}$)')
ax.set_ylim([-100,2450])
ax.legend(loc='upper center',bbox_to_anchor=(0, 0., 0.75, 1),title='Elevation measurement error\nwith slope $\\alpha$ and quality\n of stereo-correlation $q$',title_fontsize=6)
ax.grid(linewidth=0.25)
ax.tick_params(width=0.35,length=2.5)
ax = fig.add_subplot(grid[9:, :5])
ax.text(0.025, 0.966, 'b', transform=ax.transAxes,
fontsize=8, fontweight='bold', va='top', ha='left')
csv_dir = '/home/atom/ongoing/work_worldwide/tvar/06_rgi60'
list_in_csv = glob(os.path.join(csv_dir,'**/*dh_tvar.csv'),recursive=True)
# in_csv = '/home/atom/ongoing/work_worldwide/tvar/06_rgi60/N64W018_dh_tvar.csv'
for in_csv in list_in_csv:
df = pd.read_csv(in_csv)
df = df[df['bin_val']==-15]
lags=df['lags']
vmean=df['vmean']
vstd=df['vstd']
ax.plot(lags, vmean, lw=0.75, label='Empirical variogram',color='tab:blue')
sublag = np.linspace(0,20,200)
white = np.ones(np.shape(sublag)) * 65
local = 50 * (1 - np.exp(-1/2*4*sublag**2))
period = 40 * (1 - np.exp(-2 * np.sin(np.pi/1*sublag)**2))
linear = 1.2 * sublag **2
rq = 150*(1 - (1+ sublag**2/(2*100000*3))**(-100000))
# ft.plot_vgm(df['lags'],df['vmean'],df['vstd'])
ax.plot(sublag,white+local+period+linear, color='black', lw=0.75, linestyle='dashed',label = 'Modelled variogram: \nsum of individual models')
ax.plot(sublag,white, color='tab:cyan', label = 'Model: white noise',linestyle='dashed',lw=0.5)
ax.plot(sublag,local, color='tab:red', label = 'Model: local',linestyle='dashed',lw=0.5)
ax.plot(sublag,period, color='tab:gray', label = 'Model: periodic',linestyle='dashed',lw=0.5)
ax.plot(sublag,linear, color='tab:orange', label = 'Model: linear',linestyle='dashed',lw=0.5)
# ax.plot(sublag,rq,color='magenta',label='local-linear kernel')
# ax.fill_between(lags, vmean + vstd, vmean - vstd, facecolor='blue', alpha=0.5)
# ax.set_title('Variogram: ')
ax.set_xlabel('Temporal lag (years)')
ax.set_ylabel('Variance of elevation differences (m$^{2}$)')
ax.legend(loc='upper center',bbox_to_anchor=(0, 0., 0.75, 1),title='Temporal covariance\nof glacier elevation',title_fontsize=6)
ax.set_xlim([0,12])
ax.grid(linewidth=0.25)
ax.set_ylim([0,400])
ax.tick_params(width=0.35,length=2.5)
# ax.set_xlim([0,10])
import numpy as np
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import RBF, ConstantKernel as C, ExpSineSquared as ESS, PairwiseKernel, RationalQuadratic as RQ, WhiteKernel as WK
import pyddem.fit_tools as ft
import xarray as xr
import matplotlib.pyplot as plt
# fn_stack = '/home/atom/ongoing/N64W017.nc'
# fn_stack = '/home/atom/ongoing/N77E016.nc'
# Upsala
fn_stack='/home/atom/ongoing/work_worldwide/figures/data_for_figs/S50W074.nc'
ds=xr.open_dataset(fn_stack)
ds.load()
ref_dem_date=np.datetime64('2013-01-01')
gla_mask = '/home/atom/data/inventory_products/RGI/rgi60_all.shp'
# gla_mask = None
nproc=10
clobber=True
tstep=0.1
opt_gpr=False
kernel=None
filt_ref='both'
filt_ls=False
conf_filt_ls=0.99
#specify the exact temporal extent needed to be able to merge neighbouring stacks properly
tlim=[np.datetime64('2000-01-01'),np.datetime64('2019-01-01')]
#pixel
# x=418930
# y=7107460
# x= 439900
# y=7099000
# x=530000
# y=8593000
# x=515300
# y=8601200
# x=510740
# y=8584920
# x=544674
# y=8580970
#Upsala
x=623000
y=4471000
#filtering temporal values
keep_vals = ds.uncert.values < 20
ds = ds.isel(time=keep_vals)
t_vals = ds.time.values
# dates_rm_dupli = sorted(list(set(list(t_vals))))
# ind_firstdate = []
# for i, date in enumerate(dates_rm_dupli):
# ind_firstdate.append(list(t_vals).index(date))
# ds_filt = ds.isel(time=np.array(ind_firstdate))
# for i in range(len(dates_rm_dupli)):
# t_ind = (t_vals == dates_rm_dupli[i])
# if np.count_nonzero(t_ind) > 1:
# print('Here: '+str(i))
# print(ds.time.values[t_ind])
# # careful, np.nansum gives back zero for an axis full of NaNs
# mask_nan = np.all(np.isnan(ds.z.values[t_ind, :]), axis=0)
# ds_filt.z.values[i, :] = np.nansum(ds.z.values[t_ind, :] * 1. / ds.uncert.values[t_ind, None, None] ** 2,
# axis=0) / np.nansum(1. / ds.uncert.values[t_ind, None, None] ** 2, axis=0)
# # ds_filt.z.values[i, : ] = np.nanmean(ds.z.values[t_ind,:],axis=0)
# ds_filt.z.values[i, mask_nan] = np.nan
# # ds_filt.uncert.values[i] = np.nanmean(ds.uncert.values[t_ind])
# ds_filt.uncert.values[i] = np.nansum(ds.uncert.values[t_ind] * 1. / ds.uncert.values[t_ind] ** 2) / np.nansum(
# 1. / ds.uncert.values[t_ind] ** 2)
# ds = ds_filt
#starting
t_vals = ds['time'].values
ds_pixel = ds.sel(x=x,y=y,method='pad')
elev = ds_pixel.z.values
filt = np.logical_and.reduce((np.isfinite(elev),elev>-420,elev<8900))
t_vals=t_vals[filt]
elev=elev[filt]
med_slope = 10.
corr = ds_pixel.corr.values[filt]
uncert = np.sqrt(ds.uncert.values[filt]**2 + (20 * np.tan(med_slope * np.pi / 180)) ** 2 + (((100-corr)/100)*20)**2.5)
# fig, ax = plt.subplots(figsize=(16,9))
# ax.errorbar(t_vals, elev, uncert,fmt='o',color='black')
# ax.set_title('Raw data')
# ax.set_xlabel('Year after 2000')
# ax.set_ylabel('Elevation (m)')
# ax.legend(loc='lower left')
# ax.grid()
# plt.savefig('elev_raw.png',dpi=360)
# plt.close()
ref_arr = ds.ref_z.values
ref_vals = ds_pixel.ref_z.values
#spatial filtering
cutoff_kern_size=500
cutoff_thr=400.
res = 30
rad = int(np.floor(cutoff_kern_size / res))
max_arr, min_arr = ft.maxmin_disk_filter((ref_arr, rad))
ind_x = np.where(ds.x.values==ds_pixel.x.values)[0]
ind_y = np.where(ds.y.values==ds_pixel.y.values)[0]
max_elev = max_arr[ind_y,ind_x][0]
min_elev = min_arr[ind_y,ind_x][0]
ind = np.logical_or(elev > (max_elev + cutoff_thr), elev < (min_elev - cutoff_thr))
elev2 = np.copy(elev)
elev2[ind] = np.nan
#temporal filtering
base_thresh=150.
thresh=[-50,50]
delta_t = (ref_dem_date - t_vals).astype('timedelta64[D]').astype(float) / 365.24
dh = ref_vals - elev2
dt_arr = np.ones(dh.shape)
for i, d in enumerate(delta_t):
dt_arr[i] = dt_arr[i] * d
d_data = dh / dt_arr
ind2 = np.logical_or(np.logical_and(dt_arr < 0, np.logical_or(dh < - base_thresh + dt_arr*thresh[1], dh > base_thresh + dt_arr*thresh[0])),
np.logical_and(dt_arr > 0, np.logical_or(dh > base_thresh + dt_arr*thresh[1], dh < - base_thresh + dt_arr*thresh[0])))
vect1 = np.arange(np.datetime64('2000-01-01'),ref_dem_date,np.timedelta64(10))
dt1 = (ref_dem_date - vect1).astype('timedelta64[D]').astype(float)/365.24
vect2 = np.arange(ref_dem_date,np.datetime64('2020-01-01'),np.timedelta64(10))
dt2 = (ref_dem_date - vect2).astype('timedelta64[D]').astype(float)/365.24
vect = np.arange( | np.datetime64('2000-01-01') | numpy.datetime64 |
import numpy as np
from superbol.fit_blackbody import (bb_flux_integrated, bb_total_flux,
dbb_flux_integrated_dT, dbb_total_flux_dT)
def integrate_fqbol(wavelengths, fluxes, flux_uncertainties):
"""Calculate the trapezoidal rule integral of the observed `fluxes`.
The trapezoidal rule integrates the data by assuming the function is linear between observed points, and then integrates under those line segments.
The numpy function `trapz` is used to perform the integration, but the uncertainty in the integral due to uncertainties in the observed flux is calculated by hand using standard error propagation techniques.
Args:
wavelengths (list): List of wavelengths at which the flux was observed.
fluxes (list): List of observed fluxes.
flux_uncertainties (list): List of uncertainties in each observed flux.
Returns:
tuple: 2-tuple of floats.
* The value of the integral
* The uncertainty in the integral due to uncertainties in the fluxes.
(fqbol, fqbol_uncertainty)
"""
fqbol = np.trapz(fluxes, wavelengths)
quad_terms = np.array([])
for i, uncertainty in enumerate(flux_uncertainties):
if i == 0:
term = 0.5 * (wavelengths[i + 1] - wavelengths[i]) * uncertainty
quad_terms = np.append(quad_terms, term)
elif i == len(flux_uncertainties) - 1:
term = 0.5 * (wavelengths[i] - wavelengths[i - 1]) * uncertainty
quad_terms = np.append(quad_terms, term)
else:
term = 0.5 * (
wavelengths[i + 1] - wavelengths[i - 1]) * uncertainty
quad_terms = np.append(quad_terms, term)
fqbol_uncertainty = np.sqrt(np.sum(x * x for x in quad_terms))
fqbol_uncertainty = fqbol_uncertainty
return fqbol, fqbol_uncertainty
def ir_correction(temperature, T_err, angular_radius, rad_err, longest_wl):
"""Apply correction for unobserved flux in the IR.
After the temperature and angular radius has been found through fitting a
blackbody to the observed fluxes, this function takes those values and
integrates under the fitted blackbody function from the longest observed
wavelength out to :math:`\\lambda = \\infty`.
Args:
temperature (float): Best fit blackbody temperature in Kelvin
T_err (float): Uncertainty in best fit blackbody temperature in Kelvin
angular_radius (float): Best fit blackbody angular radius
rad_err (float): Uncertainty in best fit blackbody angular radius
longest_wl (float): Longest observed wavelength
Returns:
tuple: 2-tuple
* (float): The IR correction in :math:`erg \\; s^{-1} cm^{-2}`
* (float): The uncertainty in the IR correction in the same units
"""
ir_correction = bb_total_flux(temperature,
angular_radius) - bb_flux_integrated(
longest_wl, temperature, angular_radius)
T_errterm = (dbb_total_flux_dT(temperature, angular_radius) -
dbb_flux_integrated_dT(longest_wl, temperature,
angular_radius)) * T_err
rad_errterm = 2 * ir_correction / angular_radius * rad_err
ir_corr_err = np.sqrt(T_errterm**2 + rad_errterm**2)
return ir_correction, ir_corr_err
def uv_correction_blackbody(temperature, T_err, angular_radius, rad_err,
shortest_wl):
"""Apply correction for unobserved flux in the UV using the blackbody fit.
After the temperature and angular radius have been found through fitting a
blackbody to the observed fluxes, this function takes those values and
integrates under the fitted blackbody from the shortest observed wavelength
down to :math:`\\lambda = 0`.
Args:
temperature (float): Best fit blackbody temperature in Kelvin
T_err (float): Uncertainty in best fit blackbody temperature in Kelvin
angular_radius (float): Best fit blackbody angular radius
rad_err (float): Uncertainty in best fit blackbody angular radius
shortest_wl (float): Shortest observed wavelength
Returns:
tuple: 2-tuple
* (float): The UV correction in :math:`erg \\; s^{-1} cm^{-2}`
* (float): The uncertainty in the UV correction in the same units
"""
uv_correction = bb_flux_integrated(shortest_wl, temperature,
angular_radius)
T_errterm = dbb_flux_integrated_dT(shortest_wl, temperature,
angular_radius) * T_err
rad_errterm = 2 * uv_correction / angular_radius * rad_err
uv_corr_err = | np.sqrt(T_errterm**2 + rad_errterm**2) | numpy.sqrt |
#!/usr/local/bin/python
# -*- coding: utf-8 -*-
from IPython import embed
from multiprocessing import Pool, cpu_count
#import mega_nn
import numpy as np
import scipy as sc
import scipy.stats as stats
import pandas as pd
from itertools import product, chain
import pickle
import os
import sys
import time
networks_path = os.path.abspath(os.path.join((os.path.abspath(__file__)), '../../networks'))
NNDB_path = os.path.abspath(os.path.join((os.path.abspath(__file__)), '../../NNDB'))
training_path = os.path.abspath(os.path.join((os.path.abspath(__file__)), '../../training'))
qlk4D_path = os.path.abspath(os.path.join((os.path.abspath(__file__)), '../../../QLK4DNN'))
sys.path.append(networks_path)
sys.path.append(NNDB_path)
sys.path.append(training_path)
sys.path.append(qlk4D_path)
from model import Network, NetworkJSON, PostprocessSlice, ComboNetwork, MultiNetwork, no_elements_in_list, any_element_in_list, db
from run_model import QuaLiKizNDNN, QuaLiKizDuoNN
from train_NDNN import shuffle_panda
from functools import partial
if __name__ == '__main__':
import matplotlib as mpl
mpl.use('pdf')
import matplotlib.pyplot as plt
from matplotlib import gridspec, cycler
pretty = False
from load_data import nameconvert
from load_data import load_data, load_nn, prettify_df
from collections import OrderedDict
from peewee import AsIs, fn, SQL
import re
import gc
def mode_to_settings(mode):
settings = {}
if mode == 'debug':
settings['plot'] = True
settings['plot_pop'] = True
settings['plot_nns'] = True
settings['plot_slice'] = True
settings['plot_poplines'] = True
settings['plot_threshlines'] = True
settings['plot_zerocolors'] = False
settings['plot_thresh1line'] = False
settings['calc_thresh1'] = False
settings['hide_qualikiz'] = False
settings['debug'] = True
settings['parallel'] = False
settings['plot_threshslope'] = False
elif mode == 'quick':
settings['plot'] = False
settings['plot_pop'] = False
settings['plot_nns'] = False
settings['plot_slice'] = False
settings['plot_poplines'] = False
settings['plot_threshlines'] = False
settings['plot_zerocolors'] = False
settings['plot_thresh1line'] = False
settings['calc_thresh1'] = False
settings['hide_qualikiz'] = False
settings['debug'] = False
settings['parallel'] = True
settings['plot_threshslope'] = False
elif mode == 'pretty':
settings['plot'] = True
settings['plot_pop'] = False
settings['plot_nns'] = True
settings['plot_slice'] = False
settings['plot_poplines'] = False
settings['plot_threshlines'] = False
settings['plot_zerocolors'] = False
settings['plot_thresh1line'] = False
settings['calc_thresh1'] = False
settings['hide_qualikiz'] = False
settings['debug'] = True
settings['parallel'] = False
settings['plot_threshslope'] = True
return settings
def get_similar_not_in_table(table, max=20, only_dim=None, only_sep=False, no_particle=False, no_divsum=False,
no_mixed=True):
for cls, field_name in [(Network, 'network'),
(ComboNetwork, 'combo_network'),
(MultiNetwork, 'multi_network')
]:
non_sliced = (cls
.select()
.where(~fn.EXISTS(table.select().where(getattr(table, field_name) == cls.id)))
)
if only_dim is not None:
non_sliced &= cls.select().where(SQL("array_length(feature_names, 1)=" + str(only_dim)))
if no_mixed:
non_sliced &= cls.select().where(~(SQL("(array_to_string(target_names, ',') like %s)", ['%pf%']) &
(SQL("(array_to_string(target_names, ',') like %s)", ['%ef%'])))
)
tags = []
if no_divsum is True:
tags.extend(["div", "plus"])
if no_particle is True:
tags.append('pf')
if len(tags) != 0:
non_sliced &= no_elements_in_list(cls, 'target_names', tags)
if only_sep is True:
non_sliced &= any_element_in_list(cls, 'target_names', ['TEM', 'ITG', 'ETG'])
if non_sliced.count() > 0:
network = non_sliced.get()
break
non_sliced &= (cls.select()
.where(cls.target_names == AsIs(network.target_names))
.where(cls.feature_names == AsIs(network.feature_names))
)
non_sliced = non_sliced.limit(max)
return non_sliced
def nns_from_NNDB(max=20, only_dim=None):
db.connect()
non_sliced = get_similar_not_in_table(PostprocessSlice, max=max, only_sep=True, no_particle=False, no_divsum=True, only_dim=only_dim)
network = non_sliced.get()
style = 'mono'
if len(network.target_names) == 2:
match_0 = re.compile('^(.f)(.)(ITG|ETG|TEM)_GB').findall(network.target_names[0])
match_1 = re.compile('^(.f)(.)(ITG|ETG|TEM)_GB').findall(network.target_names[1])
if len(match_0) == 1 and len(match_1) == 1:
group_0 = match_0[0]
group_1 = match_1[0]
if ((group_0[1] == 'e' and group_1[1] == 'i') or
(group_0[1] == 'i' and group_1[1] == 'e')):
style='duo'
else:
raise Exception('non-matching target_names. Not sure what to do.. {s}'
.format(network.target_names))
matches = []
for target_name in network.target_names:
matches.extend(re.compile('^.f.(ITG|ETG|TEM)_GB').findall(target_name))
if matches[1:] == matches[:-1]:
if matches[0] == 'ITG':
slicedim = 'Ati'
elif matches[0] == 'TEM' or matches[0] == 'ETG':
slicedim = 'Ate'
else:
raise Exception('Unequal stability regime. Cannot determine slicedim')
nn_list = {network.id: str(network.id) for network in non_sliced}
print('Found {:d} {!s} with target {!s}'.format(non_sliced.count(), network.__class__, network.target_names))
nns = OrderedDict()
for dbnn in non_sliced:
nn = dbnn.to_QuaLiKizNN()
nn.label = '_'.join([str(el) for el in [dbnn.__class__.__name__ , dbnn.id]])
nns[nn.label] = nn
db.close()
return slicedim, style, nns
def populate_nn_list(nn_set):
if nn_set == 'c_L2':
nn_list = OrderedDict([(61, '$c_{L2} = 0.0$'),
# (48, '$c_{L2} = 0.05$'),
(37, '$c_{L2} = 0.1$'),
# (50, '$c_{L2} = 0.2$'),
# (51, '$c_{L2} = 0.35$'),
(49, '$c_{L2} = 0.5$'),
# (52, '$c_{L2} = 1.0$'),
(53, '$c_{L2} = 2.0$')])
slicedim = 'Ate'
style = 'mono'
elif nn_set == 'topo':
nn_list = OrderedDict([(65, 'neurons = $(10, 10)$'),
(64, 'neurons = $(30, 30)$'),
(73, 'neurons = $(30, 30, 30)$'),
(83, 'neurons = $(45, 45)$'),
(34, 'neurons = $(60, 60)$'),
(38, 'neurons = $(80, 80)$'),
(66, 'neurons = $(120, 120)$')])
slicedim = 'Ate'
style = 'mono'
elif nn_set == 'filter':
#nn_list = OrderedDict([(37, 'filter = 3'),
# (58, 'filter = 4'),
# (60, 'filter = 5')])
nn_list = OrderedDict([(37, '$max(\chi_{ETG,e}) = 60$'),
(60, '$max(\chi_{ETG,e}) = 100$')])
slicedim = 'Ate'
style = 'mono'
elif nn_set == 'goodness':
nn_list = OrderedDict([(62, 'goodness = mabse'),
(37, 'goodness = mse')])
slicedim = 'Ate'
style = 'mono'
elif nn_set == 'early_stop':
nn_list = OrderedDict([(37, 'stop measure = loss'),
#(11, '$early_stop = mse'),
(18, 'stop measure = MSE')])
slicedim = 'Ate'
style = 'mono'
elif nn_set == 'similar':
nn_list = OrderedDict([
(37, '37'),
(67, '67'),
(68, '68'),
(69, '69'),
(70, '70'),
(71, '71'),
(72, '72'),
(73, '73'),
(74, '74'),
])
slicedim = 'Ate'
style = 'mono'
elif nn_set == 'best':
nn_list = OrderedDict([(46, '')]) #efeETG
nn_list = OrderedDict([(88, '')]) #efiITG
slicedim = 'Ate'
style = 'mono'
elif nn_set == 'duo':
nn_list = OrderedDict([
(205, 'es_20'),
(204, 'es_5'),
(203, 'es_wrong')
])
slicedim = 'Ati'
style = 'duo'
return slicedim, style, nn_list
def nns_from_nn_list(nn_list, slicedim, labels=True):
nns = OrderedDict()
for nn_index, nn_label in nn_list.items():
nn = nns[nn_index] = load_nn(nn_index)
if labels:
nn.label = nn_label
else:
nn.label = ''
return nns
def nns_from_manual():
nns = OrderedDict()
#div_nn = load_nn(405)
#sum_nn = load_nn(406)
#nn = QuaLiKizDuoNN(['efiITG_GB', 'efeITG_GB'], div_nn, sum_nn, [lambda x, y: x * y/(x + 1), lambda x, y: y/(x + 1)])
#nn.label = 'div_style'
#nns[nn.label] = nn
#nn_efi = load_nn(88)
#nn_efe = load_nn(89)
#nn = QuaLiKizDuoNN(['efiITG_GB', 'efeITG_GB'], nn_efi, nn_efe, [lambda x, y: x, lambda x, y: y])
#nn.label = 'sep_style'
#nns[nn.label] = nn
#nn = load_nn(205)
#nn.label = 'combo_style'
#nns[nn.label] = nn
#subnn = (ComboNetwork.select()
# .where(ComboNetwork.id == 78)
# ).get()
#nn = subnn.to_QuaLiKizComboNN()
#nn.label = 'bla'
#nns[nn.label] = nn
#dbnn = Network.by_id(135).get()
dbnns = []
#dbnns.append(MultiNetwork.by_id(119).get())
dbnns.append(ComboNetwork.by_id(3333).get())
#dbnns.append(ComboNetwork.by_id(1050).get())
#dbnns.append(MultiNetwork.by_id(102).get())
for dbnn in dbnns:
nn = dbnn.to_QuaLiKizNN()
nn.label = '_'.join([str(el) for el in [dbnn.__class__.__name__ , dbnn.id]])
nns[nn.label] = nn
#nns[nn.label] = QuaLiKizNDNN.from_json('nn.json')
slicedim = 'Ati'
style='duo'
style='mono'
#from qlkANNk import QuaLiKiz4DNN
#nns['4D'] = QuaLiKiz4DNN()
#nns['4D'].label = '4D'
#nns['4D']._target_names = ['efeITG_GB', 'efiITG_GB']
db.close()
return slicedim, style, nns
def prep_df(store, nns, unstack, filter_less=np.inf, filter_geq=-np.inf, shuffle=True, calc_maxgam=False, clip=False, slice=None, frac=1):
nn0 = list(nns.values())[0]
target_names = nn0._target_names
feature_names = nn0._feature_names
input = store['megarun1/input']
try:
input['logNustar'] = np.log10(input['Nustar'])
del input['Nustar']
except KeyError:
print('No Nustar in dataset')
if ('Zeffx' == feature_names).any() and not ('Zeffx' in input.columns):
print('WARNING! creating Zeffx. You should use a 9D dataset')
input['Zeffx'] = np.full_like(input['Ati'], 1.)
raise Exception
if ('logNustar' == feature_names).any() and not ('logNustar' in input.columns):
print('WARNING! creating logNustar. You should use a 9D dataset')
input['logNustar'] = np.full_like(input['Ati'], np.log10(0.009995))
if len(feature_names) == 4:
print('WARNING! Slicing 7D to 4D dataset. You should use a 4D dataset')
idx = input.index[(
np.isclose(input['Ate'], 5.75, atol=1e-5, rtol=1e-3) &
np.isclose(input['An'], 2, atol=1e-5, rtol=1e-3) &
np.isclose(input['x'], .45, atol=1e-5, rtol=1e-3)
)]
else:
idx = input.index
input = input[feature_names]
data = store.select('megarun1/flattened', columns=target_names)
input = input.loc[idx]
data = data.loc[input.index]
df = input.join(data[target_names])
if calc_maxgam is True:
df_gam = store.select('/megarun1/flattened', columns=['gam_leq_GB', 'gam_great_GB'])
df_gam = (df_gam.max(axis=1)
.to_frame('maxgam')
)
df = df.join(df_gam)
#itor = zip(['An', 'Ate', 'Ti_Te', 'qx', 'smag', 'x'], ['0.00', '10.00', '1.00', '5.00', '0.40', '0.45'])
#itor = zip(['Zeffx', 'Ate', 'An', 'qx', 'smag', 'x', 'Ti_Te', 'logNustar'], [1.0, 5.75, 2.5, 2.0, 0.10000000149011612, 0.33000001311302185, 1.0, -2.000217201545864])
if slice is not None:
for name, val in slice:
df = df[np.isclose(df[name], float(val), atol=1e-5, rtol=1e-3)]
if clip is True:
df[target_names] = df[target_names].clip(filter_less, filter_geq, axis=1)
else:
# filter
df = df[(df[target_names] < filter_less).all(axis=1)]
df = df[(df[target_names] >= filter_geq).all(axis=1)]
#print(np.sum(df['target'] < 0)/len(df), ' frac < 0')
#print(np.sum(df['target'] == 0)/len(df), ' frac == 0')
#print(np.sum(df['target'] > 0)/len(df), ' frac > 0')
#uni = {col: input[col].unique() for col in input}
#uni_len = {key: len(value) for key, value in uni.items()}
#input['index'] = input.index
df.set_index([col for col in input], inplace=True)
df = df.astype('float64')
df = df.sort_index(level=unstack)
df = df.unstack(unstack)
if shuffle:
df = shuffle_panda(df)
#df.sort_values('smag', inplace=True)
#input, data = prettify_df(input, data)
#input = input.astype('float64')
# Filter
if frac < 1:
idx = int(frac * len(df))
df = df.iloc[:idx, :]
#df = df.iloc[1040:2040,:]
print('dataset loaded!')
return df, target_names
def is_unsafe(df, nns, slicedim):
unsafe = True
for nn in nns.values():
slicedim_idx = nn._feature_names[nn._feature_names == slicedim].index[0]
varlist = list(df.index.names)
varlist.insert(slicedim_idx, slicedim)
try:
if ~np.all(varlist == nn._feature_names):
unsafe = False
except ValueError:
raise Exception('Dataset has features {!s} but dataset has features {!s}'.format(varlist, list(nn._feature_names)))
return unsafe
def calculate_thresh1(x, feature, target, debug=False):
try:
idx = target.index[target == 0][-1] #index of last zero
slope, intercept, r_value, p_value, std_err = stats.linregress(feature[(target.index > idx) & ~target.isnull()], target[(target.index > idx) & ~target.isnull()])
thresh_pred = x * slope + intercept
thresh1 = x[thresh_pred < 0][-1]
except (ValueError, IndexError):
thresh1 = np.NaN
if debug:
print('No threshold1')
return thresh1
def calculate_thresh2(feature, target, debug=False):
if len(target.shape) > 1:
raise NotImplementedError('2D threshold not implemented yet')
try:
idx = np.where(target == 0)[0][-1] #Only works for 1D
idx2 = np.where(~np.isnan(target[idx+1:]))[0][0] + idx + 1
#idx = np.arange(target.shape[0]),target.shape[1] - 1 - (target[:,::-1]==0).argmax(1) #Works for 2D
thresh2 = (feature[idx] + feature[idx2]) / 2
except IndexError:
thresh2 = np.NaN
if debug:
print('No threshold2')
return thresh2
#5.4 ms ± 115 µs per loop (mean ± std. dev. of 7 runs, 100 loops each) total
def process_chunk(target_names, chunck, settings=None, unsafe=False):
res = []
for ii, row in enumerate(chunck.iterrows()):
res.append(process_row(target_names, row, settings=settings, unsafe=unsafe))
return res
def process_row(target_names, row, ax1=None, unsafe=False, settings=None):
index, slice_ = row
feature = slice_.index.levels[1]
#target = slice.loc[target_names]
target = slice_.values[:len(feature) * len(target_names)].reshape(len(target_names), len(feature))
if np.all(np.logical_or(target == 0, np.isnan(target))):
return (1,)
else:
# 156 µs ± 10.4 µs per loop (mean ± std. dev. of 7 runs, 10000 loops each) (no zerocolors)
thresh_nn = np.empty(len(target_names) * len(nns))
thresh_nn_i = np.empty_like(thresh_nn, dtype='int64')
popbacks = np.empty_like(thresh_nn)
thresh1_misses = np.empty_like(thresh_nn)
thresh2_misses = np.empty_like(thresh_nn)
if settings['plot_zerocolors']:
maxgam = slice_['maxgam']
# Create slice, assume sorted
# 14.8 µs ± 1.27 µs per loop (mean ± std. dev. of 7 runs, 100000 loops each)
x = np.linspace(feature.values[0],
feature.values[-1],
200)
#if plot:
if not ax1 and settings['plot']:
fig = plt.figure()
if settings['plot_pop'] and settings['plot_slice']:
gs = gridspec.GridSpec(2, 2, height_ratios=[10, 1], width_ratios=[5,1],
left=0.05, right=0.95, wspace=0.05, hspace=0.05)
ax2 = plt.subplot(gs[1,0])
ax3 = plt.subplot(gs[0,1])
if not settings['plot_pop'] and settings['plot_slice']:
gs = gridspec.GridSpec(2, 1, height_ratios=[10, 2], width_ratios=[1],
left=0.05, right=0.95, wspace=0.05, hspace=0.05)
ax2 = plt.subplot(gs[1,0])
if not settings['plot_pop'] and not settings['plot_slice']:
gs = gridspec.GridSpec(1, 1, height_ratios=[1], width_ratios=[1],
left=0.05, right=0.95, wspace=0.05, hspace=0.05)
ax1 = plt.subplot(gs[0,0])
#ax1.set_prop_cycle(cycler('color', ['#f1eef6','#d7b5d8','#df65b0','#dd1c77','#980043']))
# http://tristen.ca/hcl-picker/#/clh/5/273/2A0A75/D59FEB
#ax1.set_prop_cycle(cycler('color', ['#2A0A75','#6330B8','#9F63E2','#D59FEB']))
if len(nns) == 1:
color_range = | np.array([.7]) | numpy.array |
import sys
import operator
import pytest
import ctypes
import gc
import warnings
import numpy as np
from numpy.core._rational_tests import rational
from numpy.core._multiarray_tests import create_custom_field_dtype
from numpy.testing import (
assert_, assert_equal, assert_array_equal, assert_raises, HAS_REFCOUNT)
from numpy.compat import pickle
from itertools import permutations
def assert_dtype_equal(a, b):
assert_equal(a, b)
assert_equal(hash(a), hash(b),
"two equivalent types do not hash to the same value !")
def assert_dtype_not_equal(a, b):
assert_(a != b)
assert_(hash(a) != hash(b),
"two different types hash to the same value !")
class TestBuiltin:
@pytest.mark.parametrize('t', [int, float, complex, np.int32, str, object,
np.compat.unicode])
def test_run(self, t):
"""Only test hash runs at all."""
dt = np.dtype(t)
hash(dt)
@pytest.mark.parametrize('t', [int, float])
def test_dtype(self, t):
# Make sure equivalent byte order char hash the same (e.g. < and = on
# little endian)
dt = np.dtype(t)
dt2 = dt.newbyteorder("<")
dt3 = dt.newbyteorder(">")
if dt == dt2:
assert_(dt.byteorder != dt2.byteorder, "bogus test")
assert_dtype_equal(dt, dt2)
else:
assert_(dt.byteorder != dt3.byteorder, "bogus test")
assert_dtype_equal(dt, dt3)
def test_equivalent_dtype_hashing(self):
# Make sure equivalent dtypes with different type num hash equal
uintp = np.dtype(np.uintp)
if uintp.itemsize == 4:
left = uintp
right = np.dtype(np.uint32)
else:
left = uintp
right = np.dtype(np.ulonglong)
assert_(left == right)
assert_(hash(left) == hash(right))
def test_invalid_types(self):
# Make sure invalid type strings raise an error
assert_raises(TypeError, np.dtype, 'O3')
assert_raises(TypeError, np.dtype, 'O5')
assert_raises(TypeError, np.dtype, 'O7')
assert_raises(TypeError, np.dtype, 'b3')
assert_raises(TypeError, np.dtype, 'h4')
assert_raises(TypeError, np.dtype, 'I5')
assert_raises(TypeError, np.dtype, 'e3')
assert_raises(TypeError, np.dtype, 'f5')
if np.dtype('g').itemsize == 8 or np.dtype('g').itemsize == 16:
assert_raises(TypeError, np.dtype, 'g12')
elif np.dtype('g').itemsize == 12:
assert_raises(TypeError, np.dtype, 'g16')
if np.dtype('l').itemsize == 8:
assert_raises(TypeError, np.dtype, 'l4')
assert_raises(TypeError, np.dtype, 'L4')
else:
| assert_raises(TypeError, np.dtype, 'l8') | numpy.testing.assert_raises |
#! /usr/bin/env python
import logging
import os
import pickle
import numpy as np
from scipy import stats
from astropy import table
from astropy.io import fits
import ptemcee
# todo: don't hard code paths
mag_sun = table.Table.read(os.path.expanduser('~') + '/sluggs/sps_models/mag_sun.fits')
# how bright is the Sun in different filters and magnitide systems
def get_mag_sun(name, AB=True):
row = mag_sun[mag_sun['filter'] == name][0]
if AB:
return row['AB']
else:
return row['Vega']
def prob_bi(theta, mags, metal, metal_ivar2, A_V, A_V_ivar2, A_V2, A_V2_ivar2):
lnprob = (metal - theta[0])**2 * metal_ivar2
red = -(A_V - theta[3])**2 * A_V_ivar2
red_2 = -(A_V2 - theta[3])**2 * A_V2_ivar2
lnprob += -np.logaddexp(red, red_2)
for mag, mag_ivars2, reddening_grid, grid in mags:
lnprob += (mag - theta[3] * reddening_grid(theta[0], theta[1])[0][0] - grid(theta[0], theta[1])[0][0] + 2.5 * theta[2])**2 * mag_ivars2
return -lnprob
def prob(theta, mags, metal, metal_ivar2, A_V, A_V_ivar2):
lnprob = (metal - theta[0])**2 * metal_ivar2
lnprob += (A_V - theta[3])**2 * A_V_ivar2
for mag, mag_ivars2, reddening_grid, grid in mags:
lnprob += (mag - theta[3] * reddening_grid(theta[0], theta[1])[0][0] - grid(theta[0], theta[1])[0][0] + 2.5 * theta[2])**2 * mag_ivars2
return -lnprob
def prior(theta, metal_lower, metal_upper, age_lower, age_upper, A_V_lower, A_V_upper):
if theta[0] > metal_upper or theta[0] < metal_lower:
return -np.inf
if theta[1] > age_upper or theta[1] < age_lower:
return -np.inf
if theta[3] > A_V_upper or theta[3] < A_V_lower:
return -np.inf
else:
return 0.
class ln_prior:
def __init__(self, metal_lower=-3.0, metal_upper=0.7, age_lower=0.1,
age_upper=15.84, A_V_lower=0., A_V_upper=np.inf):
self.metal_lower = metal_lower
self.metal_upper = metal_upper
self.age_lower = age_lower
self.age_upper = age_upper
self.A_V_lower = A_V_lower
self.A_V_upper = A_V_upper
def __call__(self, x):
return prior(x, self.metal_lower, self.metal_upper, self.age_lower,
self.age_upper, self.A_V_lower, self.A_V_upper)
class ln_prob_bi:
def __init__(self, mags, metal, metal_e, A_V, A_V_e, A_V2, A_V2_e):
self.mags = mags
self.metal = metal
self.metal_ivar2 = metal_e**-2.
self.A_V = A_V
self.A_V_ivar2 = A_V_e**-2.
self.A_V2 = A_V2
self.A_V2_ivar2 = A_V2_e**-2.
def __call__(self, x):
return prob_bi(x, self.mags, self.metal, self.metal_ivar2,
self.A_V, self.A_V_ivar2, self.A_V2, self.A_V2_ivar2)
class ln_prob:
def __init__(self, mags, metal, metal_e, A_V, A_V_e):
self.mags = mags
self.metal = metal
self.metal_ivar2 = metal_e**-2.
self.A_V = A_V
self.A_V_ivar2 = A_V_e**-2.
def __call__(self, x):
return prob(x, self.mags, self.metal, self.metal_ivar2,
self.A_V, self.A_V_ivar2)
# takes a list of magnitudes, and samples the posterior distributions of metallicity,
# age, mass and extinction subject to the Gaussian priors on metallicity and extinction
def calc_age_mass(magnitudes, metal, metal_e, A_V, A_V_e, grids=None,
reddening_grids=None, plot=False, nwalkers=1000, steps=500, thin=10,
keep_chain=False, threads=4, metal_lower=-3.0, metal_upper=0.7,
age_lower=0.001, age_upper=15.84, A_V_lower=0., A_V_upper=np.inf, A_V2=0,
A_V2_e=0, ntemps=8, nburn=500, logger=None):
if logger is None:
logger = logging.getLogger()
input_str = 'Input:\n'
if metal is not None and metal_e is not None:
input_str += '[Z/H] {:.3f} {:.3f}'.format(metal, metal_e)
input_str += ' A_V {:.3f} {:.3f}'.format(A_V, A_V_e)
if A_V2_e:
input_str += ' {:.3f} {:.3f}'.format(A_V2, A_V2_e)
input_str += '\n'
input_str += magnitude_str(magnitudes)
input_str += '\nPriors:\n'
input_str += '{:.3f} < [Z/H] < {:.3f}\n'.format(metal_lower, metal_upper)
input_str += '{:.3f} < Age < {:.3f}\n'.format(age_lower, age_upper)
input_str += '{:.3f} < A_V < {:.3f}\n'.format(A_V_lower, A_V_upper)
logger.info(input_str)
# I should not hard code paths like this
if grids is None:
with open(os.path.expanduser('~') + '/sluggs/sps_models/fsps_mist_inter_mags.pickle', 'rb') as f:
grids = pickle.load(f)
if reddening_grids is None:
with open(os.path.expanduser('~') + '/sluggs/sps_models/fsps_reddening_mist_inter_mags.pickle', 'rb') as f:
reddening_grids = pickle.load(f)
if metal is None or metal_e is None:
metal = -1
metal_e = 10
mags = get_mags(magnitudes, reddening_grids, grids)
metal_guess, age_guess, mass_guess, A_V_guess = grid_search(mags, metal,
metal_e, A_V, A_V_e, A_V2, A_V2_e, age_lower, age_upper, metal_lower,
metal_upper, A_V_lower, A_V_upper, logger)
def start_array(guess, nwalkers, lower, upper):
start = guess + 1e-2 * np.random.randn(nwalkers)
start[start < lower] = lower
start[start > upper] = upper
return start
start = [start_array(metal_guess, nwalkers, metal_lower, metal_upper),
start_array(age_guess, nwalkers, age_lower, age_upper),
start_array(mass_guess, nwalkers, -np.inf, np.inf),
start_array(A_V_guess, nwalkers, A_V_lower, A_V_upper)]
start = np.asarray(start).T
if A_V2_e:
logl = ln_prob_bi(mags, metal, metal_e, A_V, A_V_e, A_V2, A_V2_e)
else:
logl = ln_prob(mags, metal, metal_e, A_V, A_V_e)
logprior = ln_prior(metal_lower, metal_upper, age_lower,
age_upper, A_V_lower, A_V_upper)
log_likely = logl([metal_guess, age_guess, mass_guess, A_V_guess])
start_str = 'Starting at:\n{:.3f} {:.3f} {:.3f} {:.3f}\nStarting log likelihood {:.3f}\n'.format(metal_guess,
age_guess, mass_guess, A_V_guess, log_likely)
logger.info(start_str)
sampler = ptemcee.Sampler(nwalkers, start.shape[-1], logl, logprior,
threads=threads, ntemps=ntemps)
temp_start = []
for i in range(ntemps):
temp_start.append(start)
temp_start = np.array(temp_start)
sampler.run_mcmc(temp_start, (nburn + steps))
samples = sampler.chain[0, :, nburn:, :].reshape((-1, start.shape[-1]))
samples = samples[::thin]
if threads > 1:
sampler.pool.close()
if keep_chain:
np.save(open(str(keep_chain) + '_chain.npy', 'w'), np.asarray(sampler.chain))
norm_percentiles = stats.norm.cdf([-2, -1, 0, 1, 2]) * 100
Z_precentiles = np.percentile(samples[:,0], norm_percentiles)
age_precentiles = np.percentile(samples[:,1], norm_percentiles)
mass_precentiles = np.percentile(samples[:,2], norm_percentiles)
A_V_precentiles = np.percentile(samples[:,3], norm_percentiles)
output_str = 'Output:\n'
output_str += '[Z/H] ' + ' '.join(['{:.3f}'.format(Z) for Z in Z_precentiles]) + '\n'
output_str += ' {:.3f} \u00B1{:.3f}'.format(np.mean(samples[:,0]), np.std(samples[:,0])) + '\n'
output_str += 'age ' + ' '.join(['{:.3f}'.format(age) for age in age_precentiles]) + '\n'
output_str += ' {:.3f} \u00B1{:.3f}'.format(np.mean(samples[:,1]), np.std(samples[:,1])) + '\n'
output_str += 'mass ' + ' '.join(['{:.3f}'.format(mass) for mass in mass_precentiles]) + '\n'
output_str += ' {:.3f} \u00B1{:.3f}'.format(np.mean(samples[:,2]), np.std(samples[:,2])) + '\n'
output_str += 'A_V ' + ' '.join(['{:.3f}'.format(red) for red in A_V_precentiles]) + '\n'
output_str += ' {:.3f} \u00B1{:.3f}'.format(np.mean(samples[:,3]), np.std(samples[:,3])) + '\n'
log_likely = logl([Z_precentiles[2], age_precentiles[2], mass_precentiles[2], A_V_precentiles[2]])
output_str += 'Log likelihood: {:.3f}\n'.format(log_likely)
logger.info(output_str)
if plot:
import corner
corner.corner(samples, labels=['[Z/H]', 'Age', 'Log Mass', 'A_V'], quantiles=[0.16, 0.50, 0.84], show_titles=True)
return samples, Z_precentiles[1:-1], age_precentiles[1:-1], mass_precentiles, A_V_precentiles[1:-1]
def get_mags(magnitudes, reddening_grids, grids):
mags = []
for name, mag, mag_e in magnitudes:
mags.append((mag, mag_e**-2., reddening_grids[name], grids[name]))
return mags
def magnitude_str(magnitudes):
output = ''
for magnitude in magnitudes:
output += '{} {:.3f} {:.3f} '.format(*magnitude)
return output
def load_samples(catalogue, directory='.', verbose=False):
samples = []
for entry in catalogue:
path = os.path.join(directory, entry['output'] + '.fits')
with fits.open(path) as hdul:
sample = hdul[2].data
if verbose:
print(entry['output'], sample.shape)
samples.append(sample)
samples = np.vstack(samples)
return samples
def corner_array(array, names):
return np.asarray([array[name] for name in names]).T
def plot_posterior(filename, verbose=False):
import corner
with fits.open(filename) as hdul:
entry = table.Table.read(hdul[1])
sample = hdul[2].data
if verbose:
print(entry[0])
corner.corner(corner_array(sample, ['Z', 'age', 'mass', 'A_V']),
labels=['[Z/H]', 'age', 'mass', 'A_V'],
quantiles=[0.02, 0.16, 0.5, 0.84, 0.98],
show_titles=True)
def plot_samples(samples, catalogue, name, metal_range=(-2.4, 0.7), age_range=(0, 15.8), bins=100):
import matplotlib.pyplot as plt
from matplotlib import colors
h = | np.histogram2d(samples['age'], samples['Z'], bins=bins, range=(age_range, metal_range)) | numpy.histogram2d |
from scipy.io import netcdf_file
import numpy as np
from datetime import datetime
from scipy.interpolate import griddata
def grepValuesByMask(xi, data, mask):
"""
this grabs the values from data from entries with positive mask and interpolates it to numpy meshgrid xi
"""
X=data.getX()
x=[]
y=[]
z=[]
values=[]
for i in range(mask.getNumberOfDataPoints()):
if mask.getTupleForDataPoint(i)[0] > 0:
x.append(X.getTupleForDataPoint(i)[0])
y.append(X.getTupleForDataPoint(i)[1])
z.append(X.getTupleForDataPoint(i)[2])
values.append(data.getTupleForDataPoint(i)[0])
if len(xi) == 2:
r=griddata((np.array(x), np.array(y)), np.array(values), tuple(xi), method='linear', fill_value=np.nan, rescale=False)
else:
r=griddata((np.array(x), np.array(y), | np.array(z) | numpy.array |
from __future__ import division, print_function, absolute_import
import numpy
from numpy import (atleast_1d, poly, polyval, roots, real, asarray,
resize, pi, absolute, logspace, r_, sqrt, tan, log10,
arctan, arcsinh, sin, exp, cosh, arccosh, ceil, conjugate,
zeros, sinh, append, concatenate, prod, ones, array,
mintypecode)
from .filters import *
from .helpers import fminbound, fmin, newton
from mpmath import ellipk, ellipfun, besselk
import numpy as np
__all__ = ['iirfilter', 'cheb1ord', 'cheb2ord', 'buttord']
def iirfilter(N, Wn, rp=None, rs=None, btype='band', analog=False,
ftype='butter', output='ba'):
"""
IIR digital and analog filter design given order and critical points.
Design an Nth-order digital or analog filter and return the filter
coefficients.
Parameters
----------
N : int
The order of the filter.
Wn : array_like
A scalar or length-2 sequence giving the critical frequencies.
For digital filters, `Wn` is normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`Wn` is thus in
half-cycles / sample.)
For analog filters, `Wn` is an angular frequency (e.g. rad/s).
rp : float, optional
For Chebyshev and elliptic filters, provides the maximum ripple
in the passband. (dB)
rs : float, optional
For Chebyshev and elliptic filters, provides the minimum attenuation
in the stop band. (dB)
btype : {'bandpass', 'lowpass', 'highpass', 'bandstop'}, optional
The type of filter. Default is 'bandpass'.
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
ftype : str, optional
The type of IIR filter to design:
- Butterworth : 'butter'
- Chebyshev I : 'cheby1'
- Chebyshev II : 'cheby2'
- Cauer/elliptic: 'ellip'
- Bessel/Thomson: 'bessel'
output : {'ba', 'zpk', 'sos'}, optional
Type of output: numerator/denominator ('ba') or pole-zero ('zpk').
Default is 'ba'.
Returns
-------
b, a : ndarray, ndarray
Numerator (`b`) and denominator (`a`) polynomials of the IIR filter.
Only returned if ``output='ba'``.
z, p, k : ndarray, ndarray, float
Zeros, poles, and system gain of the IIR filter transfer
function. Only returned if ``output='zpk'``.
sos : ndarray
Second-order sections representation of the IIR filter.
Only returned if ``output=='sos'``.
See Also
--------
butter : Filter design using order and critical points
cheby1, cheby2, ellip, bessel
buttord : Find order and critical points from passband and stopband spec
cheb1ord, cheb2ord, ellipord
iirdesign : General filter design using passband and stopband spec
Notes
-----
The ``'sos'`` output parameter was added in 0.16.0.
Examples
--------
Generate a 17th-order Chebyshev II bandpass filter and plot the frequency
response:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> b, a = signal.iirfilter(17, [50, 200], rs=60, btype='band',
... analog=True, ftype='cheby2')
>>> w, h = signal.freqs(b, a, 1000)
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.semilogx(w, 20 * np.log10(abs(h)))
>>> ax.set_title('Chebyshev Type II bandpass frequency response')
>>> ax.set_xlabel('Frequency [radians / second]')
>>> ax.set_ylabel('Amplitude [dB]')
>>> ax.axis((10, 1000, -100, 10))
>>> ax.grid(which='both', axis='both')
>>> plt.show()
"""
ftype, btype, output = [x.lower() for x in (ftype, btype, output)]
Wn = asarray(Wn)
try:
btype = band_dict[btype]
except KeyError:
raise ValueError("'%s' is an invalid bandtype for filter." % btype)
try:
typefunc = filter_dict[ftype][0]
except KeyError:
raise ValueError("'%s' is not a valid basic IIR filter." % ftype)
if output not in ['ba', 'zpk']:
raise ValueError("'%s' is not a valid output form." % output)
if rp is not None and rp < 0:
raise ValueError("passband ripple (rp) must be positive")
if rs is not None and rs < 0:
raise ValueError("stopband attenuation (rs) must be positive")
# Get analog lowpass prototype
if typefunc == buttap:
z, p, k = typefunc(N)
# elif typefunc == besselap:
# z, p, k = typefunc(N, norm=bessel_norms[ftype])
elif typefunc == cheb1ap:
if rp is None:
raise ValueError("passband ripple (rp) must be provided to "
"design a Chebyshev I filter.")
z, p, k = typefunc(N, rp)
elif typefunc == cheb2ap:
if rs is None:
raise ValueError("stopband attenuation (rs) must be provided to "
"design an Chebyshev II filter.")
z, p, k = typefunc(N, rs)
# elif typefunc == ellipap:
# if rs is None or rp is None:
# raise ValueError("Both rp and rs must be provided to design an "
# "elliptic filter.")
# z, p, k = typefunc(N, rp, rs)
else:
raise NotImplementedError("'%s' not implemented in iirfilter." % ftype)
# Pre-warp frequencies for digital filter design
if not analog:
if numpy.any(Wn <= 0) or numpy.any(Wn >= 1):
raise ValueError("Digital filter critical frequencies "
"must be 0 < Wn < 1")
fs = 2.0
warped = 2 * fs * | tan(pi * Wn / fs) | numpy.tan |
'''
Created on Nov 1, 2016
@author: <NAME> and <NAME>
'''
import logging
import pickle
import pandas as pd
import datetime
import os, subprocess
import sklearn.metrics as skm
import numpy as np
import taggers.lample_lstm_tagger.lstm_wrapper as lstm_wrapper
from baselines.dawid_and_skene import ds, ibccvb
from evaluation.metrics import calculate_scores
from evaluation.plots import SCORE_NAMES
from baselines.hmmcrowd import HMM_crowd
from baselines.util import crowd_data, data_to_hmm_crowd_format, subset_hmm_crowd_data
from baselines import ibcc, clustering, majority_voting
from bsc import bsc
logging.basicConfig(level=logging.DEBUG)
data_root_dir = '../../../data/bayesian_sequence_combination/'
def _append_to_csv(outputdir, method, method_idx, new_data, filename, file_identifier):
filename = os.path.join(outputdir, '%s_%s.csv' % (filename, file_identifier))
new_data = pd.DataFrame(new_data[:, method_idx], columns=[str(method).strip('[]')])
if os.path.isfile(filename):
data = pd.read_csv(filename, delimiter=',')
expanded_data = pd.concat((data, new_data), axis=1)
else:
expanded_data = new_data
expanded_data.to_csv(filename, index=False)
class Experiment(object):
def __init__(self, outputdir, nclasses, annotations, gold, doc_start, text,
annos_val=None, gold_val=None, doc_start_val=None, text_val=None,
gold_nocrowd=None, doc_start_nocrowd=None, text_nocrowd=None,
alpha0_factor=1.0, alpha0_diags=1.0, beta0_factor=0.1, begin_factor=1.0,
max_iter=20, crf_probs=False, rep=0, bootstrapping=False
):
'''
:param outputdir: the directory where results, predictions and any model files will be stored
:param annotations: rows correspond to tokens, columns to annotators.
:param gold: class labels for computing the performance metrics. Missing values should be set to -1.
:param doc_start: binary vector indicating the start of each sequence/document/sentence.
:param text: strings associated with each token.
:param annos_val: crowdsourced labels for a validation set.
:param gold_val: for tuning hyperparameters
:param doc_start_val:
:param text_val:
:param gold_nocrowd: for evaluating on a second set of data points where crowd labels are not available
:param doc_start_nocrowd:
:param text_nocrowd: the features for testing the model trained on crowdsourced data to classify data with no labels at all
:param nclasses: number of classes
:param alpha0_factor: smoothing hyperparameter for annotator models.
:param alpha0_diags: correctness bias hyperparameter for annotator models.
:param beta0_factor: smoothing hyperparameter for prior over class labels.
:param begin_factor: additional correctness bias for B tokens.
:param max_iter: maximum iterations for iterative aggregation methods.
:param crf_probs: LSTM method produces probabilities using a CRF output layer.
:param rep: repetition number for an experiment that is carried out multiple times; sets a different random seed for each repetition.
:param bootstrapping: calculate performance metrics using bootstrapping for small datasets
'''
self.methods = None
self.num_classes = nclasses
self.postprocess = False # previous papers did not use this so we leave out to make results comparable.
self.random_sampling = False
self.outputdir = outputdir
if not os.path.exists(outputdir):
os.mkdir(outputdir)
outputdir += '/'
# Data -------------------------------------------------
self.annos_test = annotations
self.gold_test = gold
self.doc_start_test = doc_start
self.text_test = text
self.annos_val = annos_val
self.gold_val = gold_val
self.doc_start_val = doc_start_val
self.text_val = text_val
self.gold_nocrowd = gold_nocrowd
self.doc_start_nocrowd = doc_start_nocrowd
self.text_nocrowd = text_nocrowd
# Method hyperparameters and settings ---------------------------------
self.crf_probs = crf_probs
self.opt_hyper = False
self.use_lb = False
self.alpha0_factor = alpha0_factor
self.alpha0_diags = alpha0_diags
self.begin_factor = begin_factor
self.beta0_factor = beta0_factor
self.max_internal_iter = 3
self.max_iter = max_iter # allow all methods to use a maximum no. iterations
np.random.seed(3849)
self.seed = np.random.randint(1, 1000, 100)[rep] # seeds for AL
# Results -------------------------------------------------------
# save results from methods here. If we use compound methods, we can reuse these results in different
# combinations of methods.
self.aggs = {}
self.probs = {}
self.bootstrapping = bootstrapping
self.scores = None
self.scores_nocrowd = None
def tune_alpha0(self, alpha0diag_proposals, alpha0factor_proposals, beta0factor_proposals,
method, metric_idx_to_optimise=8, new_data=False):
self.methods = [method]
scores = np.zeros((len(beta0factor_proposals) * len(alpha0diag_proposals), len(alpha0factor_proposals)))
best_scores = np.zeros(4) - np.inf
best_idxs = np.zeros(4)
for h, beta0factor in enumerate(beta0factor_proposals):
self.beta0_factor = beta0factor
for i, alpha0diag in enumerate(alpha0diag_proposals):
self.alpha0_diags = alpha0diag
for j, alpha0factor in enumerate(alpha0factor_proposals):
self.alpha0_factor = alpha0factor
# reset saved data so that models are run again.
self.aggs = {}
self.probs = {}
outputdir_ij = self.outputdir + ('_%i_%i_%i_' % (h, i, j)) + method + '/'
all_scores, _, _, _, _, _ = self.run_methods(outputdir_ij, new_data=new_data)
if metric_idx_to_optimise != 5: # maximise these scores
scores[(h*len(alpha0diag_proposals)) + i, j] = all_scores[metric_idx_to_optimise, :]
else: # minimise this score
scores[(h*len(alpha0diag_proposals)) + i, j] = -all_scores[metric_idx_to_optimise, :]
print('Scores for %f, %f, %f: %f' % (beta0factor, alpha0diag, alpha0factor,
scores[(h*len(alpha0diag_proposals)) + i, j]))
if scores[(h*len(alpha0diag_proposals)) + i, j] > best_scores[0]:
best_scores[0] = scores[(h*len(alpha0diag_proposals)) + i, j]
best_scores[1] = beta0factor
best_scores[2] = alpha0diag
best_scores[3] = alpha0factor
best_idxs[0] = scores[(h*len(alpha0diag_proposals)) + i, j]
best_idxs[1] = h
best_idxs[2] = i
best_idxs[3] = j
print('Saving scores for this setting to %s' % (self.outputdir + '/%s_scores.csv' % method))
np.savetxt(self.outputdir + '/%s_scores.csv' % method, scores, fmt='%s', delimiter=',',
header=str(self.methods).strip('[]'))
np.savetxt(self.outputdir + '/%s_bestscores.csv' % method, best_scores, fmt='%s', delimiter=',',
header=str(self.methods).strip('[]'))
self.aggs = {}
self.probs = {}
return best_idxs
def run_methods(self, test_on_dev=False, new_data=False,
active_learning=False, AL_batch_fraction=0.05, max_AL_iters=10,
save_with_timestamp=True):
'''
Run the aggregation methods and evaluate them.
:param test_on_dev: use test rather than dev set
:param new_data: set to true if all cached data files should be overwritten
:param active_learning: set to true to run an AL simulation
:param AL_batch_fraction: what proportion of labels are sampled at each AL iteration
:param max_AL_iters: maximum number of AL rounds.
:return:
'''
if active_learning:
print('Running active learning on the test dataset.')
if not test_on_dev:
self.annos_all = self.annos_test
self.annos = self.annos_test
self.doc_start_all = self.doc_start_test
self.doc_start = self.doc_start_test
self.text_all = self.text_test
self.text = self.text_test
self.gold = self.gold_test
else:
self.annos_all = self.annos_val
self.annos = self.annos_val
self.doc_start_all = self.doc_start_val
self.doc_start = self.doc_start_val
self.text_all = self.text_val
self.text = self.text_val
self.gold = self.gold_val
# a second test set with no crowd labels was supplied directly
if self.gold_nocrowd is not None and self.text_nocrowd is not None and self.doc_start_nocrowd is not None:
self.N_nocrowd = self.gold_nocrowd.shape[0]
else:
self.N_nocrowd = 0
print('Running experiment on %s set%s' % ('dev' if test_on_dev else 'test', '.' if self.N_nocrowd==0 else
' and predicting on additional test data with no crowd labels'))
Ntoks = self.annos.shape[0]
Ndocs = np.sum(self.doc_start)
# get total annotation count
Nannos = np.sum(self.annos_all[(self.doc_start == 1).flatten(), :] != -1)
print('Data set: %i documents, %i tokens, %i documents without crowd labels.' % (Ndocs, Ntoks, self.N_nocrowd))
preds = -np.ones((Ntoks, len(self.methods)))
probs_allmethods = -np.ones((Ntoks, self.num_classes, len(self.methods)))
preds_nocrowd = -np.ones((self.N_nocrowd, len(self.methods)))
probs_allmethods_nocrowd = -np.ones((self.N_nocrowd, self.num_classes, len(self.methods)))
# timestamp for when we started a run. Can be compared to file versions to check what was run.
timestamp = datetime.datetime.now().strftime('started-%Y-%m-%d-%H-%M-%S')
for method_idx, method in enumerate(self.methods):
print('Running method: %s' % method)
Nseen = 0
niter = 0
if active_learning:
# get the number of labels to select each iteration
self.batch_size = int(np.ceil(AL_batch_fraction * Nannos))
np.random.seed(self.seed) # for repeating with different methods with same initial set
self.annos = None
selected_docs, selected_toks, nselected_by_doc = self._uncertainty_sampling(
np.ones(Ndocs, dtype=float) / self.num_classes, None, None)
else:
selected_docs = None
nselected_by_doc = None
while Nseen < Nannos and niter < max_AL_iters:
print('Learning round %i' % niter)
# the active learning loop
if active_learning: # treat the unseen instances similarly to the no crowd instances -- get their posterior probabilities
unseen_docs = np.arange(Ndocs)
unseen_docs = unseen_docs[np.invert(np.in1d(unseen_docs, selected_docs))]
unselected_toks = np.argwhere(np.in1d(np.cumsum(self.doc_start_test) - 1, unseen_docs)).flatten()
self.N_unseentoks = len(unselected_toks)
doc_start_unseen = self.doc_start_test[unselected_toks]
text_unseen = self.text_test[unselected_toks]
else:
self.N_unseentoks = 0
doc_start_unseen = None
text_unseen = None
agg, probs, most_likely_seq_probs, agg_nocrowd, probs_nocrowd, agg_unseen, probs_unseen = \
self._run_method(method, timestamp, doc_start_unseen, text_unseen, selected_docs, nselected_by_doc,
new_data if niter==0 else False)
if np.any(self.gold != -1): # don't run this in the case that crowd-labelled data has no gold labels
if active_learning and len(agg) < len(self.gold):
agg_all = np.ones(len(self.gold))
agg_all[selected_toks] = agg.flatten()
agg_all[unselected_toks] = agg_unseen.flatten()
agg = agg_all
probs_all = np.zeros((len(self.gold), self.num_classes))
probs_all[selected_toks, :] = probs
probs_all[unselected_toks, :] = probs_unseen
probs = probs_all
self.calculate_experiment_scores(agg, probs, method_idx)
preds[:, method_idx] = agg.flatten()
probs_allmethods[:,:,method_idx] = probs
if self.N_nocrowd > 0:
self.calculate_nocrowd_scores(agg_nocrowd, probs_nocrowd, method_idx)
preds_nocrowd[:, method_idx] = agg_nocrowd.flatten()
probs_allmethods_nocrowd[:,:,method_idx] = probs_nocrowd
print('...done')
# Save the results so far after each method has completed.
Nseen = np.sum(self.annos[self.doc_start.flatten()==1] != -1) # update the number of documents processed so far
print('Nseen = %i' % Nseen)
# change the timestamps to include AL loop numbers
if save_with_timestamp:
file_id = timestamp + ('-Nseen%i' % Nseen)
else:
file_id = ''
_append_to_csv(self.outputdir, method, method_idx, self.scores, 'result', file_id)
_append_to_csv(self.outputdir, method, method_idx, self.score_std, 'result_std', file_id)
_append_to_csv(self.outputdir, method, method_idx, preds, 'pred', file_id)
if self.N_nocrowd > 0:
_append_to_csv(self.outputdir, method, method_idx, self.scores_nocrowd, 'result_nocrowd', file_id)
_append_to_csv(self.outputdir, method, method_idx, self.score_std_nocrowd, 'result_std_nocrowd', file_id)
_append_to_csv(self.outputdir, method, method_idx, preds_nocrowd, 'pred_nocrowd', file_id)
with open(os.path.join(self.outputdir, 'probs_%s.pkl' % file_id), 'wb') as fh:
pickle.dump(probs_allmethods, fh)
if self.N_nocrowd > 0:
with open(os.path.join(self.outputdir, 'probs_nocrowd_%s.pkl' % file_id), 'wb') as fh:
pickle.dump(probs_allmethods_nocrowd, fh)
# if model is not None and not active_learning and return_model:
# with open(self.outputdir + 'model_%s.pkl' % method, 'wb') as fh:
# pickle.dump(model, fh)
if active_learning and Nseen < Nannos:
# non-sequential methods just provide independent label probabilities.
if most_likely_seq_probs is None:
most_likely_seq_probs = [np.prod(seq_prob) for seq_prob in np.split(probs, self.doc_start.flatten())]
selected_docs, selected_toks, nselected_by_doc = self._uncertainty_sampling(
most_likely_seq_probs, selected_toks, selected_docs)
print('**** Active learning: No. annos = %i' % self.annos.shape[0])
else:
selected_docs = None
niter += 1
return self.scores, preds, probs_allmethods, \
self.scores_nocrowd, preds_nocrowd, probs_allmethods_nocrowd
def _run_method(self, method, timestamp, doc_start_unseen, text_unseen, selected_docs, nselected_by_doc, new_data):
# Initialise the results because not all methods will fill these in
most_likely_seq_probs = None # some methods can compute this
agg_unseen = np.zeros(self.N_unseentoks)
# default maximum entropy situation
probs_unseen = np.ones((self.N_unseentoks, self.num_classes), dtype=float) / self.num_classes
agg_nocrowd = np.zeros(self.N_nocrowd)
probs_nocrowd = np.ones((self.N_nocrowd, self.num_classes))
if method.split('_')[0] == 'best':
agg, probs = self._run_best_worker()
elif method.split('_')[0] == 'worst':
agg, probs = self._run_worst_worker()
elif method.split('_')[0] == 'clustering':
agg, probs = self._run_clustering()
elif method.split('_')[0] == 'majority':
agg, probs = majority_voting.MajorityVoting(self.annos, self.num_classes).vote()
elif method.split('_')[0] == 'mace':
agg, probs = self._run_mace(timestamp)
elif method.split('_')[0] == 'ds':
agg, probs = self._run_ds()
elif method.split('_')[0] == 'ibcc':
agg, probs = self._run_ibcc()
elif method.split('_')[0] == 'ibcc2': # this is the newer and simpler implementation
agg, probs = self._run_ibcc2()
elif method.split('_')[0] == 'bsc' or method.split('_')[0] == 'bac':
if method not in self.aggs:
agg, probs, most_likely_seq_probs, agg_nocrowd, probs_nocrowd, agg_unseen, probs_unseen \
= self._run_bsc(
method,
doc_start_unseen=doc_start_unseen,
text_unseen=text_unseen
)
else:
agg = self.aggs[method.replace('_thenLSTM', '')]
probs = self.probs[method.replace('_thenLSTM', '')]
elif 'HMM_crowd' in method:
if 'HMM_crowd' not in self.aggs:
# we pass all annos here so they can be saved and reloaded from a single file in HMMCrowd
# format, then the relevant subset selected from that.
agg, probs, model = self._run_hmmcrowd(selected_docs, nselected_by_doc, overwrite_data_file=new_data)
most_likely_seq_probs = model.res_prob
else:
agg = self.aggs['HMM_crowd']
probs = self.probs['HMM_crowd']
elif 'gt' in method:
agg = self.gold.flatten()
probs = np.zeros((len(self.gold), self.num_classes))
probs[range(len(agg)), agg.astype(int)] = 1.0
if '_thenLSTM' in method:
agg, probs, agg_nocrowd, probs_nocrowd, agg_unseen, probs_unseen = self._run_LSTM(
agg, timestamp, doc_start_unseen, text_unseen)
self.aggs[method] = agg
self.probs[method] = probs
return agg, probs, most_likely_seq_probs, agg_nocrowd, probs_nocrowd, agg_unseen, probs_unseen
def calculate_experiment_scores(self, agg, probs, method_idx):
if self.scores is None:
self.scores = np.zeros((len(SCORE_NAMES), len(self.methods)))
self.score_std = np.zeros((len(SCORE_NAMES) - 3, len(self.methods)))
self.scores[:, method_idx][:, None], self.score_std[:, method_idx] = calculate_scores(
self.postprocess, agg, self.gold.flatten(), probs, self.doc_start_all,
self.bootstrapping, print_per_class_results=True)
def calculate_nocrowd_scores(self, agg, probs, method_idx):
if self.scores_nocrowd is None:
# for the additional test set with no crowd labels
self.scores_nocrowd = np.zeros((len(SCORE_NAMES), len(self.methods)))
self.score_std_nocrowd = np.zeros((len(SCORE_NAMES) - 3, len(self.methods)))
self.scores_nocrowd[:,method_idx][:,None], self.score_std_nocrowd[:, method_idx] = calculate_scores(
self.postprocess, agg, self.gold_nocrowd.flatten(), probs, self.doc_start_nocrowd,
self.bootstrapping, print_per_class_results=True)
# Methods -----------------------------------------------------------------
def _run_best_worker(self):
# choose the best classifier by f1-score
f1scores = np.zeros_like(self.annos) - 1.0
print('F1 scores for individual workers:')
individual_scores = []
for w in range(self.annos.shape[1]):
valididxs = self.annos[:, w] != -1
if not np.any(valididxs):
continue
f1_by_class = skm.f1_score(self.gold.flatten()[valididxs], self.annos[valididxs, w], labels=range(self.num_classes),
average=None)
f1_w = np.mean(f1_by_class[np.unique(self.gold[valididxs]).astype(int)])
#print(f1_w)
individual_scores.append(f1_w)
f1scores[valididxs, w] = f1_w
#print(sorted(individual_scores))
best_idxs = np.argmax(f1scores, axis=1)
agg = self.annos[np.arange(self.annos.shape[0]), best_idxs]
probs = np.zeros((self.gold.shape[0], self.num_classes))
for k in range(self.gold.shape[0]):
probs[k, int(agg[k])] = 1
return agg, probs
def _run_worst_worker(self):
# choose the weakest classifier by f1-score
f1scores = np.zeros_like(self.annos) + np.inf
for w in range(self.annos.shape[1]):
valididxs = self.annos[:, w] != -1
if not np.any(valididxs):
continue
f1_by_class = skm.f1_score(self.gold.flatten()[valididxs], self.annos[valididxs, w], labels=range(self.num_classes),
average=None)
f1scores[valididxs, w] = np.mean(f1_by_class[np.unique(self.gold[valididxs]).astype(int)])
worst_idxs = np.argmin(f1scores, axis=1)
agg = self.annos[np.arange(self.annos.shape[0]), worst_idxs]
probs = np.zeros((self.gold.shape[0], self.num_classes))
for k in range(self.gold.shape[0]):
probs[k, int(agg[k])] = 1
return agg, probs
def _run_clustering(self):
cl = clustering.Clustering(self.gold, self.annos, self.doc_start)
agg = cl.run()
probs = np.zeros((self.gold.shape[0], self.num_classes))
for k in range(self.gold.shape[0]):
probs[k, int(agg[k])] = 1
return agg, probs
def _run_ds(self):
probs = ds(self.annos, self.num_classes, self.beta0_factor, self.max_iter)
agg = np.argmax(probs, axis=1)
return agg, probs
def _run_ibcc2(self):
probs, _ = ibccvb(self.annos, self.num_classes, self.beta0_factor,
self.alpha0_factor, self.alpha0_diags, self.begin_factor, self.max_iter)
agg = np.argmax(probs, axis=1)
return agg, probs
def _run_ibcc(self, use_ml=False):
if use_ml:
alpha0 = np.ones((self.num_classes, self.num_classes)) + 0.1 # no prior information at all, just add a small regularization term
ibc = ibcc.IBCC(nclasses=self.num_classes, nscores=self.num_classes, nu0=np.ones(self.num_classes),
alpha0=alpha0, uselowerbound=True, use_ml=True)
else:
self.ibcc_beta0 = np.ones(self.num_classes) * self.beta0_factor
self.ibcc_alpha0 = (self.alpha0_factor/float(self.num_classes-1)) * np.ones((self.num_classes, self.num_classes)) \
+ (self.alpha0_diags + self.alpha0_factor *(1-1/float(self.num_classes-1))) * np.eye(self.num_classes)
ibc = ibcc.IBCC(nclasses=self.num_classes, nscores=self.num_classes, nu0=self.ibcc_beta0,
alpha0=self.ibcc_alpha0, uselowerbound=True)
ibc.verbose = True
ibc.max_iterations = self.max_iter
# ibc.optimise_alpha0_diagonals = True
if self.opt_hyper:
probs = ibc.combine_classifications(self.annos, table_format=True, optimise_hyperparams=True,
maxiter=10000)
else:
probs = ibc.combine_classifications(self.annos, table_format=True) # posterior class probabilities
agg = probs.argmax(axis=1) # aggregated class labels
return agg, probs
def _run_mace(self, timestamp):
anno_path = os.path.join(self.outputdir, 'annos_tmp_%s' % timestamp)
annotations = pd.DataFrame(self.annos)
annotations.replace(-1, np.nan)
annotations.to_csv(anno_path, sep=',', header=False, index=False)
subprocess.call(['java', '-jar', './src/baselines/MACE/MACE.jar', '--distribution', '--prefix',
os.path.join(self.outputdir, 'mace'),
anno_path]) # , stdout = devnull, stderr = devnull)
result = np.genfromtxt(os.path.join(self.outputdir, 'mace.prediction'))
probs = np.zeros((self.gold.shape[0], self.num_classes))
for i in range(result.shape[0]):
for j in range(0, self.num_classes * 2, 2):
probs[i, int(result[i, j])] = result[i, j + 1]
os.remove(anno_path) # clean up tmp file
agg = np.argmax(probs, 1)
return agg, probs
def _run_bsc(self, method, doc_start_unseen=None, text_unseen=None):
method_bits = method.split('_')
if method_bits[-1] == 'noHMM':
transition_model = 'None'
else:
transition_model = 'HMM'
# needs to run integrate method for task 2 as well
if len(method_bits) > 2 and 'integrateLSTM' in method_bits:
if len(method_bits) > 3 and 'atEnd' in method_bits:
use_LSTM = 2
else:
use_LSTM = 1
else:
use_LSTM = 0
if len(method_bits) > 2 and 'integrateIF' in method_bits:
use_IF = True
else:
use_IF = False
worker_model = method_bits[1]
L = self.num_classes
num_types = (self.num_classes - 1) / 2
outside_label = 1
inside_labels = (np.arange(num_types) * 2 + 1).astype(int)
inside_labels[0] = 0
begin_labels = (np.arange(num_types) * 2 + 2).astype(int)
data_model = []
dev_sentences = []
if use_LSTM > 0:
data_model.append('LSTM')
if self.gold_val is not None and self.doc_start_val is not None and self.text_val is not None:
dev_sentences, _, _ = lstm_wrapper.data_to_lstm_format(self.text_val,
self.doc_start_val, self.gold_val)
if use_IF:
no_words = False
else:
no_words = True
bsc_model = bsc.BSC(L=L, K=self.annos.shape[1], max_iter=self.max_iter, # eps=-1,
inside_labels=inside_labels, outside_label=outside_label, beginning_labels=begin_labels,
alpha0_diags=self.alpha0_diags, alpha0_factor=self.alpha0_factor,
alpha0_B_factor=self.begin_factor,
beta0_factor=self.beta0_factor, worker_model=worker_model, tagging_scheme='IOB2',
data_model=data_model, transition_model=transition_model, no_words=no_words,
use_lowerbound=False, model_dir=self.outputdir)
bsc_model.verbose = True
bsc_model.max_internal_iters = self.max_internal_iter
if self.opt_hyper:
np.random.seed(592) # for reproducibility
probs, agg = bsc_model.optimize(self.annos, self.doc_start, self.text, maxfun=1000,
converge_workers_first=use_LSTM==2, dev_sentences=dev_sentences)
else:
probs, agg, pseq = bsc_model.run(self.annos, self.doc_start, self.text,
converge_workers_first=use_LSTM==2, crf_probs=self.crf_probs, dev_sentences=dev_sentences)
if self.gold_nocrowd is not None and '_thenLSTM' not in method:
probs_nocrowd, agg_nocrowd = bsc_model.predict(self.doc_start_nocrowd, self.text_nocrowd)
else:
probs_nocrowd = None
agg_nocrowd = None
if '_thenLSTM' not in method and doc_start_unseen is not None and len(doc_start_unseen) > 0:
probs_unseen, agg_unseen = bsc_model.predict(doc_start_unseen, text_unseen)
else:
probs_unseen = None
agg_unseen = None
return agg, probs, pseq, agg_nocrowd, probs_nocrowd, agg_unseen, probs_unseen
def _run_hmmcrowd(self, doc_subset, nselected_by_doc,
overwrite_data_file=False):
sentences, crowd_labels, nfeats = data_to_hmm_crowd_format(self.annos_all, self.text_all, self.doc_start_all,
self.outputdir, overwrite=overwrite_data_file)
sentences, crowd_labels = subset_hmm_crowd_data(sentences, crowd_labels, doc_subset, nselected_by_doc)
data = crowd_data(sentences, crowd_labels)
hc = HMM_crowd(self.num_classes, nfeats, data, None, None, n_workers=self.annos_all.shape[1],
vb=[self.beta0_factor, self.alpha0_factor], smooth=self.alpha0_factor)
hc.init(init_type='dw', wm_rep='cv', dw_em=5, wm_smooth=self.alpha0_factor)
print('Running HMM-crowd inference...')
hc.em(self.max_iter) # performance goes down with more iterations...?!
print('Computing most likely sequence...')
hc.mls()
print('HMM-crowd complete.')
# agg = np.array(hc.res).flatten()
agg = np.concatenate(hc.res)[:, None]
probs = []
for sentence_post_arr in hc.sen_posterior:
for tok_post_arr in sentence_post_arr:
probs.append(tok_post_arr)
probs = np.array(probs)
return agg.flatten(), probs, hc
def _run_LSTM(self, train_labs, timestamp, doc_start_unseen=None, text_unseen=None):
valididxs = np.any(self.annos != -1, axis=1)
labelled_sentences, IOB_map, IOB_label = lstm_wrapper.data_to_lstm_format(
self.text[valididxs], self.doc_start[valididxs], train_labs.flatten()[valididxs], self.num_classes)
np.random.seed(592) # for reproducibility
if self.gold_val is None or self.doc_start_val is None or self.text_val is None:
# If validation set is unavailable, select a random subset of combined data to use for validation
# Simulates a scenario where all we have available are crowd labels.
train_sentences, dev_sentences, self.gold_val = lstm_wrapper.split_train_to_dev(labelled_sentences)
all_sentences = labelled_sentences
else:
train_sentences = labelled_sentences
dev_sentences, _, _ = lstm_wrapper.data_to_lstm_format(self.text_val,
self.doc_start_val, self.gold_val)
all_sentences = np.concatenate((train_sentences, dev_sentences), axis=0)
lstm = lstm_wrapper.LSTMWrapper(os.path.join(self.outputdir, 'models_LSTM_%s' % timestamp))
print('Running LSTM with crf probs = %s' % self.crf_probs)
lstm.train_LSTM(all_sentences, train_sentences, dev_sentences, self.gold_val, IOB_map,
IOB_label, self.num_classes, freq_eval=5, n_epochs=self.max_internal_iter,
crf_probs=self.crf_probs, max_niter_no_imprv=2)
# now make predictions for all sentences
test_sentences, _, _ = lstm_wrapper.data_to_lstm_format(
self.text, self.doc_start, np.ones(len(train_labs)), self.num_classes)
agg, probs = lstm.predict_LSTM(test_sentences)
if self.N_nocrowd > 0:
test_sentences, _, _ = lstm_wrapper.data_to_lstm_format(
self.text_nocrowd, self.doc_start_nocrowd, np.ones(self.N_nocrowd), self.num_classes)
agg_nocrowd, probs_nocrowd = lstm.predict_LSTM(test_sentences)
else:
agg_nocrowd = None
probs_nocrowd = None
if doc_start_unseen is not None:
N_unseen = len(doc_start_unseen)
test_sentences, _, _ = lstm_wrapper.data_to_lstm_format(
text_unseen, doc_start_unseen, np.ones(N_unseen), self.num_classes)
agg_unseen, probs_unseen = lstm.predict_LSTM(test_sentences)
else:
agg_unseen = None
probs_unseen = None
return agg, probs, agg_nocrowd, probs_nocrowd, agg_unseen, probs_unseen
def _uncertainty_sampling(self, most_likely_probs, selected_toks, selected_docs):
unfinished_toks = np.ones(self.annos_test.shape[0], dtype=bool)
if self.annos is not None:
unfinished_toks[selected_toks] = (np.sum(self.annos_test[selected_toks] != -1, axis=1) - | np.sum(self.annos != -1, axis=1) | numpy.sum |
import cv2
import numpy as np
from numpy.core.defchararray import array
#import matplotlib.pyplot as plt
# import speech_recognition as sr
# import time
# from gtts import gTTS
# import os
from scipy.spatial import distance as dist
from collections import OrderedDict
from contextlib import nullcontext
import sys
import cv2 as cv2
import math
import numpy as np
from numpy.core.fromnumeric import shape
import scipy
from skimage import data
from skimage.color import rgb2gray
import matplotlib.pyplot as pp
import matplotlib as mpl
import matplotlib.pyplot as plt
from PIL import Image, ImageEnhance
import seaborn as sns
from scipy.signal import argrelextrema
from scipy.signal import argrelmin
from scipy.signal import argrelmax
from scipy.signal import find_peaks
from sklearn.cluster import KMeans
from sklearn.cluster import MeanShift, estimate_bandwidth
import skimage.io
from numpy import ones, vstack
from numpy.linalg import lstsq
from sklearn.cluster import AgglomerativeClustering
from PIL import Image, ImageEnhance
import scipy
from scipy import ndimage
import matplotlib.pyplot as plt
################################################### old start
def Slope(y2, y1, x2, x1):
m = (y2 - y1)/(x2-x1)
return m
def canny(image):
blur = cv2.GaussianBlur(image, (5, 5), 0)
canny = cv2.Canny(blur, 0, 70)
return canny
def GroupLines(image, lines8, labels):
line_image = np.zeros_like(image)
for l in labels:
if(l == 0):
x1, y1, x2, y2 = lines8[l].reshape(4)
cv2.line(line_image, (x1, y1), (x2, y2), (255, 0, 0), 2) # red
elif(l == 1):
x1, y1, x2, y2 = lines8[l].reshape(4)
cv2.line(line_image, (x1, y1), (x2, y2), (255, 0, 0), 2) # blue
elif(l == 2):
x1, y1, x2, y2 = lines8[l].reshape(4)
cv2.line(line_image, (x1, y1), (x2, y2), (255, 0, 0), 2) # green
elif(l == 3):
x1, y1, x2, y2 = lines8[l].reshape(4)
cv2.line(line_image, (x1, y1), (x2, y2), (255, 0, 0), 2) # pink
return line_image
def draw_lines_on_image(image, lines):
line_image = np.zeros_like(image)
if lines is not None:
for line in lines:
x1, y1, x2, y2 = line.reshape(4)
cv2.line(line_image, (x1, y1), (x2, y2), (255, 0, 0), 2)
return line_image
def threshold_color(image):
lowerbound = (145, 145, 145)
upperbound = (255, 255, 255)
mask = cv2.inRange(image, lowerbound, upperbound)
return mask
def getBackgroundImage(videoName):
array = []
cap = cv2.VideoCapture(videoName)
frameCount = 60
frameWidth = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
frameHeight = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
buf = np.empty((frameCount, frameHeight, frameWidth, 3), np.dtype('uint8'))
fc = 0 # used for iteration
ret = True
while (fc < frameCount and ret):
ret, img = cap.read()
cap.read()
cap.read()
cap.read()
gray = rgb2gray(img)
array.append(gray)
fc += 1
cap.release()
array = np.asarray(array, dtype=np.float16)
med = np.median(array, axis=0)
med = np.asarray(med, dtype=np.float32)
return med
region_of_interest_vertices = [
#(992, 232),
(829, 106),
#(1322, 232),
(1163, 106),
#(1952, 1296),
(1163, 1080),
#(285, 1296)
(268, 1080)
]
region_of_interest_vertices_for_lanes = [
(842, 106),
(1140, 106),
(1669, 1080),
(323, 1080)
]
def get_hi_n_lo_y(box):
hi = 0
lo = 0
for point in box:
_, y = point
if(y > hi):
lo = hi
hi = y
return hi, lo
def region_of_interest(img, vertices):
mask = np.zeros_like(img)
match_mask_color = 255
cv2.fillPoly(mask, vertices, match_mask_color)
masked_image = cv2.bitwise_and(img, mask)
return masked_image
def hole_filling(img):
floddedImg = img.copy()
h, w = floddedImg.shape
mask = np.zeros((h+2, w+2), np.uint8)
cv2.floodFill(floddedImg, mask, (0, 0), 255)
invOfFlodded = cv2.bitwise_not(floddedImg)
return invOfFlodded
def get_m_c(line):
x1, y1, x2, y2 = line.reshape(4)
P = [x1, y1]
Q = [x2, y2]
points = [(x1, y1), (x2, y2)]
x_coords, y_coords = zip(*points)
A = vstack([x_coords, ones(len(x_coords))]).T
m, c = lstsq(A, y_coords,rcond=-1)[0]
return m,c
def get_average_line(lines,img):
val=scipy.stats.mode(lines)
h, w = img.shape
x,_=val
y1=0
y2=h
print(x[0])
return x,y1,x,y2
def get_lines(img):
FilteredLines = []
UltimateLines = np.array(FilteredLines)
lines = cv2.HoughLinesP(img, 6, np.pi/360, 100,
np.array([]), minLineLength=50, maxLineGap=1000)
return lines
def get_inv(img):
pts1 = np.float32([[842, 106], [1140, 106], [323, 1080],[1669, 1080]])
pts2 = np.float32([[0, 0], [298, 0], [0, 947], [298, 947]])
matrix = cv2.getPerspectiveTransform(pts1, pts2)
matrix2 = cv2.getPerspectiveTransform(pts2, pts1)
inverse_prespective = cv2.warpPerspective(
img, matrix, (298, 947))
return img
def filterLines(lines):
flag = 0
filtered_lines = []
if lines is not None:
for line in lines:
x1, y1, x2, y2 = line.reshape(4)
deltaY = abs(y2 - y1)
deltaX = abs(x2 - x1)
angleInDegrees = math.atan2(deltaY, deltaX) * 180 / math.pi
if angleInDegrees > 85:
if flag == 0:
flag = 1
filtered_lines = np.array([line])
else:
filtered_lines = np.append(filtered_lines, [line])
filtered_lines = filtered_lines.reshape(-1, 1, 4)
return filtered_lines
def findCoutours(img, height=0, area=0):
contours, _ = cv2.findContours(
img, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
contoursImg = np.zeros_like(img)
filtered_cnts = []
bounding_pic = contoursImg.copy()
flag = 0
count = 0
for cnt in contours:
x, y, w, h = cv2.boundingRect(cnt)
a = cv2.contourArea(cnt)
if h > height and a > area:
rect = cv2.minAreaRect(cnt)
box = cv2.boxPoints(rect)
box = np.int0(box)
filtered_cnts = np.append(filtered_cnts, box)
cv2.drawContours(bounding_pic, [box], 0, (255, 255, 255), 1)
cv2.drawContours(contoursImg, [cnt], -1, (255, 255, 255), 1)
#cv2.rectangle(bounding_pic, (x, y), (x+w, y+h), (255, 255, 255), 1)
bounding_pic = bounding_pic | contoursImg
filtered_cnts = filtered_cnts.reshape(-1, 4, 2)
filtered_cnts = filtered_cnts.astype(int)
return contoursImg, filtered_cnts, bounding_pic
def findCoutours_again(img, height=0, area=0): # merge with uper wala
contours, _ = cv2.findContours(
img, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
contoursImg = np.zeros_like(img)
filtered_cnts = []
bounding_pic = contoursImg.copy()
flag = 0
count = 0
for cnt in contours:
x, y, w, h = cv2.boundingRect(cnt)
a = cv2.contourArea(cnt)
if h > height and a > area:
rect = cv2.minAreaRect(cnt)
box = cv2.boxPoints(rect)
box = np.int0(box)
filtered_cnts = np.append(filtered_cnts, x)
filtered_cnts = np.append(filtered_cnts, y)
filtered_cnts = np.append(filtered_cnts, x+w)
filtered_cnts = np.append(filtered_cnts, y)
filtered_cnts = np.append(filtered_cnts, x+w)
filtered_cnts = np.append(filtered_cnts, y+h)
filtered_cnts = np.append(filtered_cnts, x)
filtered_cnts = np.append(filtered_cnts, y+h)
cv2.drawContours(bounding_pic, [box], 0, (255, 255, 255), 1)
cv2.drawContours(contoursImg, [cnt], -1, (255, 255, 255), 1)
#cv2.rectangle(bounding_pic, (x, y), (x+w, y+h), (255, 255, 255), 1)
bounding_pic = bounding_pic | contoursImg
filtered_cnts = filtered_cnts.reshape(-1, 4, 2)
filtered_cnts = filtered_cnts.astype(int)
return contoursImg, filtered_cnts, bounding_pic
def display_lines(image, lines):
line_image = | np.zeros_like(image) | numpy.zeros_like |
""" Image Deconvolution functions
"""
import numpy
import logging
import time
log = logging.getLogger(__name__)
def hogbom(dirty, psf, window, gain, thresh, niter, fracthresh, prefix=''):
""" Clean the point spread function from a dirty image
See Hogbom CLEAN (1974A&AS...15..417H)
This version operates on numpy arrays.
:param fracthresh:
:param prefix:
:param dirty: The dirty Image, i.e., the Image to be deconvolved
:param psf: The point spread-function
:param window: Regions where clean components are allowed. If True, entire dirty Image is allowed
:param gain: The "loop gain", i.e., the fraction of the brightest pixel that is removed in each iteration
:param thresh: Cleaning stops when the maximum of the absolute deviation of the residual is less than this value
:param niter: Maximum number of components to make if the threshold `thresh` is not hit
:return: clean component Image, residual Image
"""
starttime = time.time()
assert 0.0 < gain < 2.0
assert niter > 0
log.info("hogbom %s Max abs in dirty image = %.6f Jy/beam" % (prefix, numpy.max(numpy.abs(dirty))))
absolutethresh = max(thresh, fracthresh * numpy.fabs(dirty).max())
log.info("hogbom %s Start of minor cycle" % prefix)
log.info("hogbom %s This minor cycle will stop at %d iterations or peak < %.6f (Jy/beam)" %
(prefix, niter, absolutethresh))
comps = | numpy.zeros(dirty.shape) | numpy.zeros |
from __future__ import division, absolute_import, print_function
import unittest
import numpy.testing as testing
import numpy as np
import healpy as hp
from numpy import random
import healsparse
class BuildMapsTestCase(unittest.TestCase):
def test_build_maps_single(self):
"""
Test building a map for a single-value field
"""
random.seed(seed=12345)
nside_coverage = 32
nside_map = 64
n_rand = 1000
ra = np.random.random(n_rand) * 360.0
dec = np.random.random(n_rand) * 180.0 - 90.0
# Create an empty map
sparse_map = healsparse.HealSparseMap.make_empty(nside_coverage, nside_map, np.float64)
# Look up all the values, make sure they're all UNSEEN
testing.assert_almost_equal(sparse_map.get_values_pos(ra, dec, lonlat=True), hp.UNSEEN)
# Fail to append because of wrong dtype
pixel = np.arange(4000, 20000)
values = np.ones_like(pixel, dtype=np.float32)
self.assertRaises(ValueError, sparse_map.update_values_pix, pixel, values)
# Append a bunch of pixels
values = np.ones_like(pixel, dtype=np.float64)
sparse_map.update_values_pix(pixel, values)
# Make a healpix map for comparison
hpmap = np.zeros(hp.nside2npix(nside_map)) + hp.UNSEEN
hpmap[pixel] = values
theta = np.radians(90.0 - dec)
phi = np.radians(ra)
ipnest_test = hp.ang2pix(nside_map, theta, phi, nest=True)
testing.assert_almost_equal(sparse_map.get_values_pos(ra, dec, lonlat=True), hpmap[ipnest_test])
# Replace the pixels
values += 1
sparse_map.update_values_pix(pixel, values)
hpmap[pixel] = values
testing.assert_almost_equal(sparse_map.get_values_pos(ra, dec, lonlat=True), hpmap[ipnest_test])
# Replace and append more pixels
# Note that these are lower-number pixels, so the map is out of order
pixel2 = np.arange(3000) + 2000
values2 = np.ones_like(pixel2, dtype=np.float64)
sparse_map.update_values_pix(pixel2, values2)
hpmap[pixel2] = values2
testing.assert_almost_equal(sparse_map.get_values_pos(ra, dec, lonlat=True), hpmap[ipnest_test])
# Test making empty maps
sparse_map2 = healsparse.HealSparseMap.make_empty_like(sparse_map)
self.assertEqual(sparse_map2.nside_coverage, sparse_map.nside_coverage)
self.assertEqual(sparse_map2.nside_sparse, sparse_map.nside_sparse)
self.assertEqual(sparse_map2.dtype, sparse_map.dtype)
self.assertEqual(sparse_map2._sentinel, sparse_map._sentinel)
sparse_map2b = healsparse.HealSparseMap.make_empty_like(sparse_map, cov_pixels=[0, 2])
self.assertEqual(sparse_map2b.nside_coverage, sparse_map.nside_coverage)
self.assertEqual(sparse_map2b.nside_sparse, sparse_map.nside_sparse)
self.assertEqual(sparse_map2b.dtype, sparse_map.dtype)
self.assertEqual(sparse_map2b._sentinel, sparse_map._sentinel)
self.assertEqual(len(sparse_map2b._sparse_map),
sparse_map2._cov_map.nfine_per_cov*3)
| testing.assert_array_equal(sparse_map2b._sparse_map, sparse_map._sentinel) | numpy.testing.assert_array_equal |
from bs4 import BeautifulSoup
import re
from os import listdir
from os.path import isfile, join
import numpy as np
from scipy.optimize import curve_fit
# Initialise some arrays for analyses later
exam_difficulties = []
master_questions_arr = []
# Allow user to choose which folder to ultimately extract converted pdf->html files from.
yn = input("methods (y) or spec (n): ")
if yn.lower() == "y":
folder = 'Methods-Exams'
else:
folder = 'Spec-Exams'
allPDFs = [f for f in listdir(folder) if isfile(join(folder, f))] #Get list of files in spec-exams folder
for file in range(0,len(allPDFs)):
#Setup Variables
code = data = open(folder+"/"+allPDFs[file], encoding="utf8")
html = code.read()
allQuestions = []
allTables = []
allH3 = []
#
# EXTRACT DATA AND FILTER DATA
#
soup = BeautifulSoup(html, "html.parser")
tabletag = soup.body.findAll('table')
exam_id = soup.findAll('title')[0].text #Info about this exam
#print(exam_id)
#required funciton
def hasNumbers(inputString):
return any(char.isdigit() for char in inputString)
#filter tables
for table in tabletag:
if table.text.find("Marks") != -1:
allTables.append(table)
# Identify questions
for i in range(2,6):
h3tag = soup.body.findAll('h'+str(i))
for h3 in h3tag:
if h3.text.find("Question") != -1 and hasNumbers(h3.text):
allH3.append(h3)
if len(allH3) > 0:
break
#
# ACCOUNT FOR POSSIBLE HOLES IN THE DATA
#
if len(allH3) != len(allTables): #ONLY IF THERE IS NO 'One-to-one' RELATIONSHIP (else the data has holes)
indexes_of_elements = [] #array to store 'positions' of each element in html
# Fill array of positions for titles
for i in range(0,len(allH3)):
if html.count(allH3[i].text) > 1:
if html.strip().find(allH3[i].text+"</h3") != -1:
indexes_of_elements.append([html.strip().find(allH3[i].text+"</h3"),"h3"])
elif html.strip().find(allH3[i].text+"</a") != -1:
indexes_of_elements.append([html.strip().find(allH3[i].text+"</a"),"h3"])
elif html.strip().find(allH3[i].text+"</h4") != -1:
indexes_of_elements.append([html.strip().find(allH3[i].text+"</h4"),"h3"])
elif html.strip().find(allH3[i].text+"</h2") != -1:
indexes_of_elements.append([html.strip().find(allH3[i].text+"</h2"),"h3"])
elif html.count(allH3[i].text) == 1:
indexes_of_elements.append([html.strip().find(allH3[i].text),"h3"])
previous_search_s = indexes_of_elements[0][0]
index1 = 0
# Fill array of positions for tables
while index1 != -1:
index1 = html.strip().find("<table",previous_search_s) #the left point
if index1 != -1:
indexes_of_elements.append([index1, "table"])
previous_search_s = index1+1
#Sort by order of appearance
indexes_of_elements = sorted(indexes_of_elements,key=lambda x: x[0])
running_index = 0
output = []
#Iterate with a running index to find inconsistencies in the data
for i in range(0,len(indexes_of_elements)):
#print(indexes_of_elements[i][1] + " ----- " + str(indexes_of_elements[i][0]) + " ------- " + html[indexes_of_elements[i][0]:indexes_of_elements[i][0]+20])
if indexes_of_elements[i][1] == "table":
running_index = running_index - 1
output.append("T")
elif indexes_of_elements[i][1] != "table":
running_index = running_index + 1
output.append("H")
if running_index == -1:
#Mismatch has occured, input a dummy title
output[len(output)-1] = "E"
output.append("T")
running_index = 0
elif running_index == 2:
#Mismatch has occured, input a dummy title
output[len(output)-1] = "M"
output.append("H")
running_index = 1
#Create one-to-one relationship array
j1=0
j2=0
#print(output)
for i in range(1, len(output)+1):
if i % 2 == 0: #Every H-T pair
if output[i-2] != "E" and output[i-1] != "M":
#print(j1,len(allH3),j2,len(allTables))
allQuestions.append([allH3[j1].text,allTables[j2]])
j1+=1
j2+=1
elif output[i-2] == "E":
try:
allQuestions.append(["Missing (between " + allH3[j1-1].text + " and " + allH3[j1].text + ")",allTables[j2]])
except:
allQuestions.append(["Missing (Unknown location)",allTables[j2]])
j2+=1
elif output[i-1] == "M":
allQuestions.append([allH3[j1].text,"Missing"])
j1+=1
else:
for i in range(0, len(allH3)):
allQuestions.append([allH3[i].text,allTables[i]])
#print(str(len(allQuestions)) + " Questions. From Hardest-Easiest:") #print the length (i.e-#of questions)
#
#DATA MANIPULATION
#
#Calculate difficulty ratings
for i in range(0, len(allQuestions)):
if allQuestions[i][1] != "Missing":
marks = int(allQuestions[i][1].text.split('A')[0].strip()[-1])
try:
marks = int(allQuestions[i][1].text.split('A')[0].strip()[-1])
data = []
table = allQuestions[i][1]
rows = table.find_all('tr')
for row in rows:
cols = row.find_all('td')
cols = [ele.text.strip() for ele in cols]
data.append([ele for ele in cols if ele]) # Get rid of empty values
percentages = data[1]
average = 0
mark = 0
for j in range(1,marks+2):
average += (int(percentages[j])/100)*mark
mark += 1
diff = average/marks
allQuestions[i].append(diff)
except:
try:
avg = float(re.findall("\d\.\d", allQuestions[i][1].text)[0])
diff = avg/marks
allQuestions[i].append(diff)
except:
try:
avg = float(allQuestions[i][1].text[len(allQuestions[i][1].text)-1:len(allQuestions[i][1].text)])
diff = avg/marks
if diff <= 1:
allQuestions[i].append(diff)
else:
print("error" + 1)
except:
avg = -1
else:
allQuestions[i].append(-2)
#Sort allQuestions list by difficulty
#allQuestions = sorted(allQuestions,key=lambda x: x[2])
sum_diff = 0
#Add exam year to allQuestions and display questions
for i in range(0, len(allQuestions)):
allQuestions[i].append(exam_id)
#print(allQuestions[i][0], "-", allQuestions[i][2])
sum_diff += allQuestions[i][2]
master_questions_arr.append(allQuestions[i])
avgDiff = sum_diff/len(allQuestions)
exam_difficulties.append([avgDiff,exam_id])
#print("Overall Difficulty: ", avgDiff)
master_questions_arr = sorted(master_questions_arr,key=lambda x: x[2]) #Sort all questions by difficulty
print("Loaded " + str(len(master_questions_arr)) + " total questions from " + str(len(exam_difficulties)) + " exams.")
user = input("Do you want questions with missing tables to be displayed? (y/n): ")
#Display ALL QUESTIONS:
for question in master_questions_arr:
if question[2] == -2:
#Lost data
if user.lower() == "y":
print(question[0], "-", "MISSING TABULAR DATA", " from: ", question[3])
elif question[2] == -1 or question[2] > 1:
#Edge Case
print(question[0], " - EXTREME EDGE CASE, from: ", question[3])
elif question[2] >= 0 and question[2] <= 1:
print(question[0], "-", question[2], " from: ", question[3])
#Display difficulty distribution graph
import csv
import matplotlib.pyplot as plt
import numpy as np
average_list = []
for question in master_questions_arr:
if question[2] > 0 and question[2] <= 1:
average_list.append(question[2])
plt.hist(average_list, bins = 10)
plt.show()
np.mean(average_list)
np.median(average_list)
def prob(lst, mini, maxi):
ctr = 0
for x in lst:
if mini<= x <=maxi:
ctr += 1
return ctr/len(lst)
prob(average_list, 0, 0.597)
lst = []
def analyse(lst):
plt.hist(lst, bins = 20, density=True)
if yn.lower() == "y":
plt.title("Methods*")
else:
plt.title("Specialist*")
plt.xlabel("Proportion of Marks")
plt.xticks(np.arange(0, 1.0, 0.1))
plt.show()
print("Mean: " + str( | np.mean(lst) | numpy.mean |
import csv
import os
import cv2
import numpy as np
from numpy.random import shuffle
import pickle
CSV_FILENAME= 'driving_log.csv'
def load_image(filename):
"""loads an image with opencv2."""
separator = '\\' if os.name in ['nt'] else '/'
chunk = filename.split(separator)[-3:]
image = cv2.imread("/".join(chunk))
return image
def batches(dirname, data, batch_size, steering_correction):
num_samples = len(data)
while True:
shuffle(data)
for offset in range(0, num_samples, batch_size):
batch = data[offset: offset + batch_size]
images = []
angles = []
for row in batch:
flip_steering = lambda s: -1.0 * s
steering = [float(row[3]),
float(row[3]) + steering_correction,
float(row[3]) - steering_correction]
for i in range(0, 3):
img = load_image(row[i])
images.append(img)
images.append(cv2.flip(img, 1))
angles.append(steering[i])
angles.append(flip_steering(steering[i]))
yield np.array(images), | np.array(angles) | numpy.array |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 28 21:10:21 2020
@author: pengning
does the Green's function Arnoldi iteration over a shell domain for spherical waves
nice analytical properties of polynomial representation lost when using shell domain leaving out origin
try going back to spatial discretization idea instead
"""
import numpy as np
import matplotlib.pyplot as plt
import scipy.special as sp
from .shell_domain import shell_rho_M, shell_rho_N
import mpmath
from mpmath import mp
def grid_integrate_trap(integrandgrid,diffgrid):
#integrate a spatial grid representation of the integrand using trapezoid rule
return np.sum((integrandgrid[:-1]+integrandgrid[1:])*diffgrid/2.0)
def rgrid_Mmn_normsqr(vecMgrid, rsqrgrid, rdiffgrid):
return np.real(grid_integrate_trap(np.conj(vecMgrid)*vecMgrid*rsqrgrid, rdiffgrid))
def rgrid_Mmn_dot(vecM1grid, vecM2grid, rsqrgrid, rdiffgrid):
return grid_integrate_trap(vecM1grid*vecM2grid*rsqrgrid, rdiffgrid)
def rgrid_Mmn_vdot(vecM1grid, vecM2grid, rsqrgrid, rdiffgrid):
return grid_integrate_trap(np.conj(vecM1grid)*vecM2grid*rsqrgrid, rdiffgrid)
def rgrid_Mmn_plot(vecMgrid, rgrid):
fig, (ax1, ax2) = plt.subplots(nrows=1, ncols=2)
ax1.plot(rgrid,np.real(vecMgrid))
ax2.plot(rgrid,np.imag(vecMgrid))
plt.show()
def shell_Green_grid_Mmn_vec(n,k, rsqrgrid,rdiffgrid, RgMgrid, ImMgrid, vecMgrid):
"""
evaluates G(r,r')*vecM(r') over a shell region from R1 to R2
the region coordinates are contained in rsqrgrid, a grid of r^2, and rdiffgrid, the distances between neighboring grid points; these instead of the original rgrid are given so that they only need to be computed once in main Arnoldi method
"""
#rsqrgrid = rgrid**2
#rdiffgrid = np.diff(rgrid)
RgMvecMrsqr_grid = RgMgrid*vecMgrid*rsqrgrid
Im_newvecMgrid = k**3 * grid_integrate_trap(RgMvecMrsqr_grid, rdiffgrid) * RgMgrid
Re_ImMfactgrid = np.zeros_like(rsqrgrid, dtype=np.complex)
Re_ImMfactgrid[1:] = k**3 * np.cumsum((RgMvecMrsqr_grid[:-1]+RgMvecMrsqr_grid[1:])*rdiffgrid/2.0)
rev_ImMvecMrsqr_grid = np.flip(ImMgrid*vecMgrid*rsqrgrid) #reverse the grid direction to evaluate integrands of the form kr' to kR2
Re_RgMfactgrid = np.zeros_like(rsqrgrid, dtype=np.complex)
Re_RgMfactgrid[:-1] = k**3 * np.flip(np.cumsum( (rev_ImMvecMrsqr_grid[:-1]+rev_ImMvecMrsqr_grid[1:])*np.flip(rdiffgrid)/2.0 ))
Re_newvecMgrid = -ImMgrid*Re_ImMfactgrid - RgMgrid*Re_RgMfactgrid
return Re_newvecMgrid + 1j*Im_newvecMgrid
def shell_Green_grid_Arnoldi_Mmn_oneshot(n,k,R1,R2, invchi, vecnum, gridpts=200):
rgrid = np.linspace(R1,R2,gridpts)
rsqrgrid = rgrid**2
rdiffgrid = np.diff(rgrid)
RgMgrid = sp.spherical_jn(n, k*rgrid) #the argument for radial part of spherical waves is kr
ImMgrid = sp.spherical_yn(n, k*rgrid)
RgMgrid = RgMgrid.astype(np.complex)
ImMgrid = ImMgrid.astype(np.complex)
vecMgrid = RgMgrid / np.sqrt(rgrid_Mmn_normsqr(RgMgrid, rsqrgrid,rdiffgrid))
rgrid_Mmn_plot(vecMgrid, rgrid)
unitMvecs = [vecMgrid]
for i in range(1,vecnum):
newvecMgrid = shell_Green_grid_Mmn_vec(n,k, rsqrgrid,rdiffgrid, RgMgrid, ImMgrid, unitMvecs[-1])
newvecMgrid[:] = np.real(newvecMgrid)
print('before orthogonalization and normalization:')
rgrid_Mmn_plot(newvecMgrid, rgrid)
for j in range(len(unitMvecs)):
unitMvec = unitMvecs[j]
coeff = rgrid_Mmn_vdot(unitMvec, newvecMgrid, rsqrgrid,rdiffgrid)
newvecMgrid -= coeff*unitMvec
newvecMgrid /= np.sqrt(rgrid_Mmn_normsqr(newvecMgrid, rsqrgrid,rdiffgrid))
rgrid_Mmn_plot(newvecMgrid, rgrid)
print(rgrid_Mmn_vdot(RgMgrid, newvecMgrid, rsqrgrid,rdiffgrid))
unitMvecs.append(newvecMgrid)
Green = np.zeros((vecnum,vecnum), dtype=np.complex)
for i in range(vecnum):
for j in range(vecnum):
GMjgrid = shell_Green_grid_Mmn_vec(n,k, rsqrgrid,rdiffgrid, RgMgrid, ImMgrid, unitMvecs[j])
Green[i,j] = rgrid_Mmn_vdot(unitMvecs[i],GMjgrid, rsqrgrid,rdiffgrid)
print(Green)
Umat = np.eye(vecnum)*invchi - Green
return Green, Umat
def shell_Green_grid_Arnoldi_Mmn_step(n,k, invchi, rgrid,rsqrgrid,rdiffgrid, RgMgrid, ImMgrid, unitMvecs, Gmat, plotVectors=False):
"""
this method does one more Arnoldi step, given existing Arnoldi vectors in unitMvecs
the last entry in unitMvecs is G*unitMvecs[-2] without orthogonalization and normalization
so len(unitMvecs) = len(Gmat)+1 going in and going out of the method
this is setup for most efficient iteration since G*unitMvec is only computed once
the unitMvecs list is modified on spot; a new enlarged Gmat nparray is returned at the end
"""
#first, begin by orthogonalizing and normalizing unitMvecs[-1]
#use relation U = V^{-1} - G
"""
see comment for analogous method for N waves, shell_Green_grid_Arnoldi_Nmn_step
coef1 = Gmat[-1,-1]
unitMvecs[-1] -= coef1*unitMvecs[-2]
if Gmat.shape[0]>1: #since G has symmetric Arnoldi representation (so tridiagonal), G*M_j has non-zero overlap with M_j and M_{j-1}
coef2 = Gmat[-2,-1]
unitMvecs[-1] -= coef2*unitMvecs[-3]
unitMvecs[-1][:] = np.real(unitMvecs[-1][:])
"""
vecnum = Gmat.shape[0]
for i in range(vecnum):
coef = rgrid_Mmn_vdot(unitMvecs[i], unitMvecs[-1], rsqrgrid,rdiffgrid)
unitMvecs[-1] -= coef*unitMvecs[i]
unitMvecs[-1][:] = np.real(unitMvecs[-1][:])
norm = np.sqrt(rgrid_Mmn_normsqr(unitMvecs[-1], rsqrgrid,rdiffgrid))
unitMvecs[-1] /= norm
if plotVectors:
rgrid_Mmn_plot(unitMvecs[-1], rgrid)
#get new vector
newvecM = shell_Green_grid_Mmn_vec(n,k, rsqrgrid,rdiffgrid, RgMgrid,ImMgrid, unitMvecs[-1])
newvecM[:] = np.real(newvecM)
newGmat = np.zeros((Gmat.shape[0]+1,Gmat.shape[1]+1), dtype=np.complex)
newGmat[:-1,:-1] = Gmat[:,:]
newGmat[-1,-1] = rgrid_Mmn_vdot(unitMvecs[-1], newvecM, rsqrgrid,rdiffgrid)
newGmat[-2,-1] = rgrid_Mmn_vdot(unitMvecs[-2], newvecM, rsqrgrid,rdiffgrid)
newGmat[-1,-2] = newGmat[-2,-1]
unitMvecs.append(newvecM) #append to end of unitMvecs for next round of iteration
return newGmat
def shell_Green_grid_Arnoldi_Mmn_Uconverge(n,k,R1,R2, invchi, gridpts=1000, Unormtol=1e-10, veclim=3, delveclim=2, plotVectors=False):
rgrid = np.linspace(R1,R2,gridpts)
rsqrgrid = rgrid**2
rdiffgrid = np.diff(rgrid)
RgMgrid = sp.spherical_jn(n, k*rgrid) #the argument for radial part of spherical waves is kr
ImMgrid = sp.spherical_yn(n, k*rgrid)
RgMgrid = RgMgrid.astype(np.complex)
ImMgrid = ImMgrid.astype(np.complex)
vecMgrid = RgMgrid / np.sqrt(rgrid_Mmn_normsqr(RgMgrid, rsqrgrid,rdiffgrid))
unitMvecs = [vecMgrid]
if plotVectors:
rgrid_Mmn_plot(vecMgrid, rgrid)
GvecMgrid = shell_Green_grid_Mmn_vec(n,k, rsqrgrid,rdiffgrid, RgMgrid,ImMgrid, vecMgrid)
Gmat = np.array([[rgrid_Mmn_vdot(vecMgrid, GvecMgrid, rsqrgrid,rdiffgrid)]], dtype=np.complex)
Uinv = invchi*np.eye(1)-Gmat
unitMvecs.append(GvecMgrid) #append unorthogonalized, unnormalized Arnoldi vector for further iterations
prevUnorm = 1.0/Uinv[0,0]
i=1
while i<veclim:
Gmat = shell_Green_grid_Arnoldi_Mmn_step(n,k,invchi, rgrid,rsqrgrid,rdiffgrid, RgMgrid, ImMgrid, unitMvecs, Gmat, plotVectors=plotVectors)
i += 1
if i==veclim:
#solve for first column of U and see if its norm has converged
Uinv = invchi*np.eye(Gmat.shape[0])-Gmat
b = np.zeros((Uinv.shape[0],1))
b[0] = 1.0
x = np.linalg.solve(Uinv,b)
Unorm = np.linalg.norm(x)
print('Unorm:', Unorm)
if np.abs(Unorm-prevUnorm) > np.abs(Unorm)*Unormtol:
veclim += delveclim
prevUnorm = Unorm
return RgMgrid, ImMgrid, unitMvecs, Uinv, Gmat
def rgrid_Nmn_dot(vecB1grid,vecP1grid, vecB2grid,vecP2grid, rsqrgrid,rdiffgrid):
return grid_integrate_trap((vecB1grid*vecB2grid+vecP1grid*vecP2grid)*rsqrgrid, rdiffgrid)
def rgrid_Nmn_vdot(vecB1grid,vecP1grid, vecB2grid,vecP2grid, rsqrgrid,rdiffgrid):
return grid_integrate_trap((np.conj(vecB1grid)*vecB2grid+np.conj(vecP1grid)*vecP2grid)*rsqrgrid, rdiffgrid)
def rgrid_Nmn_normsqr(vecBgrid,vecPgrid, rsqrgrid,rdiffgrid):
return np.real(rgrid_Nmn_vdot(vecBgrid,vecPgrid, vecBgrid,vecPgrid, rsqrgrid,rdiffgrid))
def rgrid_Nmn_plot(vecBgrid,vecPgrid, rgrid):
fig, (ax1, ax2, ax3, ax4) = plt.subplots(nrows=1, ncols=4,figsize=(10,4))
ax1.plot(rgrid,np.real(vecBgrid))
ax2.plot(rgrid,np.real(vecPgrid))
ax3.plot(rgrid,np.imag(vecBgrid))
ax4.plot(rgrid,np.imag(vecPgrid))
ax1.set_title('B real'); ax2.set_title('P real'); ax3.set_title('B imag'); ax4.set_title('P imag')
plt.show()
def shell_Green_grid_Nmn_vec(n,k, rsqrgrid,rdiffgrid, RgBgrid,RgPgrid, ImBgrid,ImPgrid, vecBgrid,vecPgrid):
"""
evaluates G(r,r')*vecN(r') over a shell region from R1 to R2
the region coordinates are contained in rsqrgrid, a grid of r^2, and rdiffgrid, the distances between neighboring grid points; these instead of the original rgrid are given so that they only need to be computed once in main Arnoldi method
"""
#rsqrgrid = rgrid**2
#rdiffgrid = np.diff(rgrid)
RgNvecNrsqr_grid = (RgBgrid*vecBgrid+RgPgrid*vecPgrid)*rsqrgrid
imfac = k**3 * grid_integrate_trap(RgNvecNrsqr_grid, rdiffgrid)
Im_newvecBgrid = imfac * RgBgrid
Im_newvecPgrid = imfac * RgPgrid
Re_ImNfactgrid = np.zeros_like(rsqrgrid, dtype=np.complex)
Re_ImNfactgrid[1:] = k**3 * np.cumsum((RgNvecNrsqr_grid[:-1]+RgNvecNrsqr_grid[1:])*rdiffgrid/2.0)
rev_ImNvecNrsqr_grid = np.flip((ImBgrid*vecBgrid + ImPgrid*vecPgrid) * rsqrgrid) #reverse the grid direction to evaluate integrands of the form kr' to kR2
Re_RgNfactgrid = np.zeros_like(rsqrgrid, dtype=np.complex)
Re_RgNfactgrid[:-1] = k**3 * np.flip(np.cumsum( (rev_ImNvecNrsqr_grid[:-1]+rev_ImNvecNrsqr_grid[1:])*np.flip(rdiffgrid)/2.0 ))
Re_newvecBgrid = -ImBgrid*Re_ImNfactgrid - RgBgrid*Re_RgNfactgrid
Re_newvecPgrid = -ImPgrid*Re_ImNfactgrid - RgPgrid*Re_RgNfactgrid - vecPgrid #last term is delta contribution
return Re_newvecBgrid + 1j*Im_newvecBgrid, Re_newvecPgrid + 1j*Im_newvecPgrid
def shell_Green_grid_Arnoldi_Nmn_oneshot(n,k,R1,R2, invchi, vecnum, gridpts=200):
rgrid = np.linspace(R1,R2,gridpts)
rsqrgrid = rgrid**2
rdiffgrid = np.diff(rgrid)
RgBgrid = sp.spherical_jn(n, k*rgrid)/(k*rgrid) + sp.spherical_jn(n,k*rgrid,derivative=True) #the argument for radial part of spherical waves is kr
RgPgrid = np.sqrt(n*(n+1))*sp.spherical_jn(n, k*rgrid)/(k*rgrid)
ImBgrid = sp.spherical_yn(n, k*rgrid)/(k*rgrid) + sp.spherical_yn(n,k*rgrid,derivative=True)
ImPgrid = np.sqrt(n*(n+1))*sp.spherical_yn(n, k*rgrid)/(k*rgrid)
RgBgrid = RgBgrid.astype(np.complex)
RgPgrid = RgPgrid.astype(np.complex)
ImBgrid = ImBgrid.astype(np.complex)
ImPgrid = ImPgrid.astype(np.complex)
normvec = np.sqrt(rgrid_Nmn_normsqr(RgBgrid,RgPgrid, rsqrgrid,rdiffgrid))
vecBgrid = RgBgrid / normvec
vecPgrid = RgPgrid / normvec
rgrid_Nmn_plot(vecBgrid,vecPgrid, rgrid)
unitBvecs = [vecBgrid]; unitPvecs = [vecPgrid]
for i in range(1,vecnum):
newvecBgrid, newvecPgrid = shell_Green_grid_Nmn_vec(n,k, rsqrgrid,rdiffgrid, RgBgrid,RgPgrid, ImBgrid,ImPgrid, unitBvecs[-1],unitPvecs[-1])
newvecBgrid[:] = np.real(newvecBgrid)
newvecPgrid[:] = np.real(newvecPgrid)
print('before orthogonalization and normalization:')
rgrid_Nmn_plot(newvecBgrid,newvecPgrid, rgrid)
for j in range(len(unitBvecs)):
unitBvec = unitBvecs[j]; unitPvec = unitPvecs[j]
coeff = rgrid_Nmn_vdot(unitBvec,unitPvec, newvecBgrid,newvecPgrid, rsqrgrid,rdiffgrid)
newvecBgrid -= coeff*unitBvec; newvecPgrid -= coeff*unitPvec
normvec = np.sqrt(rgrid_Nmn_normsqr(newvecBgrid,newvecPgrid, rsqrgrid,rdiffgrid))
newvecBgrid /= normvec; newvecPgrid /= normvec
rgrid_Nmn_plot(newvecBgrid,newvecPgrid, rgrid)
#print(rgrid_Mmn_vdot(RgMgrid, newvecMgrid, rsqrgrid,rdiffgrid))
unitBvecs.append(newvecBgrid); unitPvecs.append(newvecPgrid)
Green = np.zeros((vecnum,vecnum), dtype=np.complex)
for i in range(vecnum):
for j in range(vecnum):
GNj_Bgrid, GNj_Pgrid = shell_Green_grid_Nmn_vec(n,k, rsqrgrid,rdiffgrid, RgBgrid,RgPgrid, ImBgrid,ImPgrid, unitBvecs[j],unitPvecs[j])
Green[i,j] = rgrid_Nmn_vdot(unitBvecs[i],unitPvecs[i], GNj_Bgrid,GNj_Pgrid, rsqrgrid,rdiffgrid)
#print(Green)
Umat = np.eye(vecnum)*invchi - Green
Umatw,Umatv = np.linalg.eig(Umat)
print(Umatw)
print('v0', Umatv[:,0])
for i in range(len(Umatw)):
#if np.abs(Umatw[i]-1-invchi)<1e-2*np.abs(1+invchi):
if np.abs(np.imag(Umatw[i])-np.imag(invchi))<1e-4*np.abs(np.imag(invchi)):
print(Umatw[i])
print('v', Umatv[:,i])
testvecB = np.zeros_like(unitBvecs[0],dtype=np.complex)
testvecP = np.zeros_like(unitPvecs[0],dtype=np.complex)
for j in range(vecnum):
testvecB += Umatv[j,i]*unitBvecs[j]
testvecP += Umatv[j,i]*unitPvecs[j]
rgrid_Nmn_plot(testvecB,testvecP,rgrid)
rgrid_Nmn_plot(ImBgrid,ImPgrid,rgrid)
print(rgrid_Nmn_vdot(testvecB,testvecP,ImBgrid,ImPgrid,rsqrgrid,rdiffgrid))
return Green, Umat
def shell_Green_grid_Arnoldi_Nmn_step(n,k,invchi, rgrid,rsqrgrid,rdiffgrid, RgBgrid,RgPgrid, ImBgrid,ImPgrid, unitBvecs,unitPvecs, Gmat, plotVectors=False):
"""
this method does one more Arnoldi step, given existing N-type Arnoldi vectors stored in (unitBvecs, unitPvecs)
the last entry in unitB/Pvecs is G*unitNvecs[-2] without orthogonalization and normalization
so len(unitBvecs) = len(Gmat)+1 going in and going out of the method
this is setup for most efficient iteration since G*unitNvec is only computed once
the unitNvecs lists is modified on spot; a new enlarged Gmat nparray is returned at the end
"""
#first, begin by orthogonalizing and normalizing unitMvecs[-1]
#use relation U = V^{-1} - G
"""
it seems that when using grid based discretization, discretization error pushes the Arnoldi process away
from true tridiagonality; there is small non-zero values in Gmat off the tri-diagonal.
We take a middle ground: ignore the non-tridiagonal parts of Gmat due to discretization error,
but when orthogonalizing the Arnoldi vectors apply all previous vectors instead of just the closest two,
to maintain orthogonality up to eps for the Arnoldi vectors in the grid representation
coef1 = Gmat[-1,-1]
unitBvecs[-1] -= coef1*unitBvecs[-2]; unitPvecs[-1] -= coef1*unitPvecs[-2]
if Gmat.shape[0]>1: #since G has symmetric Arnoldi representation (so tridiagonal), G*N_j has non-zero overlap with N_j and N_{j-1}
coef2 = Gmat[-2,-1]
unitBvecs[-1] -= coef2*unitBvecs[-3]; unitPvecs[-1] -= coef2*unitPvecs[-3]
unitBvecs[-1][:] = np.real(unitBvecs[-1][:]); unitPvecs[-1][:] = np.real(unitPvecs[-1][:])
"""
vecnum = Gmat.shape[0]
for i in range(vecnum):
coef = rgrid_Nmn_vdot(unitBvecs[i],unitPvecs[i], unitBvecs[-1],unitPvecs[-1], rsqrgrid,rdiffgrid)
unitBvecs[-1] -= coef*unitBvecs[i]; unitPvecs[-1] -= coef*unitPvecs[i]
unitBvecs[-1][:] = np.real(unitBvecs[-1][:]); unitPvecs[-1][:] = np.real(unitPvecs[-1][:])
norm = np.sqrt(rgrid_Nmn_normsqr(unitBvecs[-1],unitPvecs[-1], rsqrgrid,rdiffgrid))
unitBvecs[-1] /= norm; unitPvecs[-1] /= norm
if plotVectors:
rgrid_Nmn_plot(unitBvecs[-1],unitPvecs[-1], rgrid)
#get new vector
newvecB,newvecP = shell_Green_grid_Nmn_vec(n,k, rsqrgrid,rdiffgrid, RgBgrid,RgPgrid, ImBgrid,ImPgrid, unitBvecs[-1],unitPvecs[-1])
newvecB[:] = np.real(newvecB); newvecP[:] = np.real(newvecP)
newGmat = np.zeros((Gmat.shape[0]+1,Gmat.shape[1]+1), dtype=np.complex)
newGmat[:-1,:-1] = Gmat[:,:]
newGmat[-1,-1] = rgrid_Nmn_vdot(unitBvecs[-1],unitPvecs[-1], newvecB,newvecP, rsqrgrid,rdiffgrid)
newGmat[-2,-1] = rgrid_Nmn_vdot(unitBvecs[-2],unitPvecs[-2], newvecB,newvecP, rsqrgrid,rdiffgrid)
newGmat[-1,-2] = newGmat[-2,-1]
unitBvecs.append(newvecB); unitPvecs.append(newvecP) #append to end of unitB/Pvecs for next round of iteration
return newGmat
def shell_Green_grid_Arnoldi_Nmn_Uconverge(n,k,R1,R2, invchi, gridpts=1000, Unormtol=1e-10, veclim=3, delveclim=2, plotVectors=False):
rgrid = np.linspace(R1,R2,gridpts)
rsqrgrid = rgrid**2
rdiffgrid = np.diff(rgrid)
RgBgrid = sp.spherical_jn(n, k*rgrid)/(k*rgrid) + sp.spherical_jn(n,k*rgrid,derivative=True) #the argument for radial part of spherical waves is kr
RgPgrid = | np.sqrt(n*(n+1)) | numpy.sqrt |
# Compatibility Python 3
# Import project files
import utils_data
# Import External Packages
import numpy as np
import math
from sklearn.model_selection import train_test_split
from sklearn import linear_model
from sklearn.preprocessing import StandardScaler, MinMaxScaler
# torch packages
import torch
from torch.autograd import Variable
from model_general_nn import predict_nn
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader
from torch.utils.data.sampler import WeightedRandomSampler
import copy # for copying models in ensembleNN
class LeastSquares:
'''
fits gathered data to the form
x_(t+1) = Ax + Bu
.train() to train the fit
.predict() to predict the next state from the current state and inputs
'''
def __init__(self, dt_x, x_dim = 12, u_dim = 4):
self.reg = linear_model.LinearRegression()
self.x_dim = x_dim
self.u_dim = u_dim
self.dt_x = dt_x
def train(self, change_states, states_prev, actions_prev):
# need to make sure data here is normalized AND the states are all
# formed as change of state, rather than arbitary values (this makes sure
# that the features are not fit to large vlaues)
# Xw = y
# this works if we stack a array
# z = [x, u]
# then can make a matrix
# w ~ [A, B]
Z = np.hstack([states_prev, actions_prev])
y = change_states
self.reg.fit(Z,y)
return self.reg.coef_
def predict(self, state, action):
# predicts next state of a state, action pairing
# forces one dimensional vector, transpose to allign with .fit dimensions
vect = np.hstack((state, action)).reshape(-1,1).T
pred = self.reg.predict(vect)
return self.dt_x*pred[0]
@property
def A_B(self):
# function that prints a readable form
print('Not Implemented lol')
class NeuralNet(nn.Module):
"""
- layer_sizes is a list of layer sizes, first layer size should be input dimension, last layer size should be output dimension
- layer_types is a list of activation functions for the middle layers. Note that current implementation sets the first layer to be linear regardless.
- learn_list is a list of state variables to use in training the dynamics. The model will learn and predict this variables.
"""
def __init__(self, layer_sizes, layer_types, dynam, state_learn_list, input_learn_list):
super(NeuralNet, self).__init__()
#To keep track of what the mean and variance are at all times for transformations
# self.scalarX = StandardScaler()
# self.scalarU = StandardScaler()
# self.scalardX = StandardScaler()
self.scalarX = MinMaxScaler(feature_range=(-1, 1))
self.scalarU = MinMaxScaler(feature_range=(-1, 1))
self.scalardX = MinMaxScaler(feature_range=(-1, 1))
# list of states and inputs to learn dynamics from
self.state_learn_list = state_learn_list
self.input_learn_list = input_learn_list
# dynam file for reference
self.dynam = dynam
if (len(layer_sizes) != len(layer_types)):
raise ValueError('Number of layer sizes does not match number of layer types passed.')
# num_angles = sum(1 for x in dynam.x_dict.values() if x[1] == 'angle')
num_angles = 0
for state in state_learn_list:
key = dynam.x_dict[state]
if key[1] == 'angle':
num_angles +=1
if ((len(state_learn_list)+len(input_learn_list)+num_angles) != layer_sizes[0]):
raise ValueError('Dimension of states and inputs to learn from does not match the first layer dimension.')
# Add linear layer with activations
l=0 # label iterator
for i in range(len(layer_sizes) - 1):
# add linear layers of size [n_in, n_out]
self.add_module(str(l), nn.Linear(layer_sizes[i], layer_sizes[i+1]))
l+= 1
# for all but last layer add activation function
if (layer_types[i] != 'nn.Linear()'):
if (layer_types[i] == 'nn.ReLU()'):
self.add_module(str(l), nn.LeakyReLU())
l =+ 1
else:
raise ValueError('Layer Type Not Implemented')
"""
Standard forward function necessary if extending nn.Module. Basically a copy of nn.Sequential
"""
def forward(self, x):
for module in self._modules.values(): #.values():
# print(module)
x = module(x)
return x
"""
Preprocess X and U (as they would be outputted by dynamics.generate_data) so they can be passed into the neural network for training
X and U should be numpy arrays with dimensionality X.shape = (num_iter, sequence_len, 12) U.shape = (num_iter, sequence_len, 4)
return: A
"""
def preprocess(self, X, U):
#Getting output dX
dX = np.array([utils_data.states2delta(val) for val in X])
# Ignore last element of X and U sequences because do not see next state
X = X[:,:-1,:]
U = U[:,:-1,:]
#translating from [psi theta phi] to [sin(psi) sin(theta) sin(phi) cos(psi) cos(theta) cos(phi)]
# modX = np.concatenate((X[:, :, 0:6], np.sin(X[:, :, 6:9]), np.cos(X[:, :, 6:9]), X[:, :, 9:]), axis=2)
# dX = np.concatenate((dX[:, :, 0:6], np.sin(dX[:, :, 6:9]), np.cos(dX[:, :, 6:9]), dX[:, :, 9:]), axis=2)
# Adds the desired variables to the X data to learn
modX = []
moddX = []
# Adds the desired inputs to the U data to learn X with
modU = []
for i in range(np.shape(X)[0]):
seqX = X[i,:,:]
seqdX = dX[i,:,:]
seqU = U[i,:,:]
# intialize empty arrays to make slicing easier
arr_X = []
arr_dX = []
arr_U = []
for state in self.state_learn_list:
# grabs state information from dictionary
key = self.dynam.x_dict[state]
# concatenate required variables for states
if (key[1] != 'angle'):
arr_X.append(seqX[:, key[0]])
arr_dX.append(seqdX[:, key[0]])
else:
arr_X.append(np.sin(seqX[:, key[0]]) )
arr_X.append(np.cos(seqX[:, key[0]]) )
arr_dX.append(np.sin(seqdX[:, key[0]]))
arr_dX.append(np.cos(seqdX[:, key[0]]) )
for inp in self.input_learn_list:
# grabs state information from dictionary
key = self.dynam.u_dict[inp]
# concatenate required variables for states
arr_U.append(seqU[:, key[0]])
# append the slice onto the array
modX.append(arr_X)
moddX.append(arr_dX)
modU.append(arr_U)
# cast to numpy arrays
modX = np.array(modX)
moddX = np.array(moddX)
modU = np.array(modU)
# swap axes for easy flatten & tensor
modX = np.swapaxes(modX, 1, 2)
moddX = np.swapaxes(moddX, 1, 2)
modU = np.swapaxes(modU, 1, 2)
#Follow by flattening the matrices so they look like input/output pairs
modX = modX.reshape(modX.shape[0]*modX.shape[1], -1)
modU = modU.reshape(modU.shape[0]*modU.shape[1], -1)
moddX = moddX.reshape(dX.shape[0]*dX.shape[1], -1)
#at this point they should look like input output pairs
if moddX.shape != modX.shape:
raise ValueError('Something went wrong, modified X shape:' + str(modX.shape) + ' dX shape:' + str(dX.shape))
#update mean and variance of the dataset with each training pass
self.scalarX.partial_fit(modX)
self.scalarU.partial_fit(modU)
self.scalardX.partial_fit(moddX)
#Normalizing to zero mean and unit variance
normX = self.scalarX.transform(modX)
normU = self.scalarU.transform(modU)
normdX = self.scalardX.transform(moddX)
print(np.shape(normX))
print(np.shape(normU))
print(np.shape(normdX))
# quit()
# print(self.scalarX.mean_)
# print(self.scalarU.mean_)
# print(self.scalardX.mean_)
# print(self.scalarX.var_)
# print(self.scalarU.var_)
# print(self.scalardX.var_)
# print(self.scalarX.n_samples_seen_)
# print(self.scalarU.n_samples_seen_)
# print(self.scalardX.n_samples_seen_)
# print(np.mean(normX))
# print(np.mean(normU))
# print(np.mean(normdX))
inputs = torch.Tensor(np.concatenate((normX, normU), axis=1))
outputs = torch.Tensor(normdX)
return list(zip(inputs, outputs))
"""
Given the raw output from the neural network, post process it by rescaling by the mean and variance of the dataset and converting from cos, sin
to actual angle
"""
def postprocess(self, dX):
# de-normalize so to say
dX = self.scalardX.inverse_transform(dX.reshape(1, -1))
dX = dX.ravel()
# print(np.shape(dX))
out = []
ang_idx = 0
def NNout2State(dX):
# helper function for transforming the output of the NN back to useable state information
out = []
# Again needs to remove cos/sin of the correct variables the desired variables to the X data to learn
l = 0
# grabs state information from dictionary
for (i,state) in enumerate(self.state_learn_list):
# grabs state information from dictionary
key = self.dynam.x_dict[state]
# concatenate required variables for states
if (key[1] != 'angle'):
out.append(dX[i+l])
else:
# out.append(np.arctan2(dX[1+i+l], dX[i+l]))
out.append(np.arctan2(dX[i+l], dX[1+i+l]))
l+= 1
return out
# Call normalization on each state predicted
if len(np.shape(dX)) > 1:
for state in dX:
out.append(NNout2State(state))
else:
out = NNout2State(dX)
return np.array(out)
# out = np.concatenate((dX[:, :6], np.arctan2(dX[:, 6:9], dX[:, 9:12]), dX[:, 12:]), axis=1)
"""
Train the neural network.
if preprocess = False
dataset is a list of tuples to train on, where the first value in the tuple is the training data (should be implemented as a torch tensor), and the second value in the tuple
is the label/action taken
if preprocess = True
dataset is simply the raw output of generate data (X, U)
Epochs is number of times to train on given training data,
batch_size is hyperparameter dicating how large of a batch to use for training,
optim is the optimizer to use (options are "Adam", "SGD")
split is train/test split ratio
"""
def train(self, dataset, learning_rate = 1e-3, epochs=50, batch_size=50, optim="Adam", loss_fn=nn.MSELoss(), split=0.9, preprocess=True):
if preprocess:
dataset = self.preprocess(dataset[0], dataset[1])
print('Shape of dataset is:', len(dataset))
trainLoader = DataLoader(dataset[:int(split*len(dataset))], batch_size=batch_size, shuffle=True)
testLoader = DataLoader(dataset[int(split*len(dataset)):], batch_size=batch_size)
#Unclear if we should be using SGD or ADAM? Papers seem to say ADAM works better
if(optim=="Adam"):
optimizer = torch.optim.Adam(super(NeuralNet, self).parameters(), lr=learning_rate)
elif(optim=="SGD"):
optimizer = torch.optim.SGD(super(NeuralNet, self).parameters(), lr=learning_rate)
else:
raise ValueError(optim + " is not a valid optimizer type")
return self._optimize(loss_fn, optimizer, epochs, batch_size, trainLoader, testLoader)
"""
Given a state X and input U, predict the change in state dX. This function does all pre and post processing for the neural net
"""
def predict(self, X, U):
#Converting to sin/cos form
# state_in = np.concatenate((X[0:6], np.sin(X[6:9]), np.cos(X[6:9]), X[9:]))
state_in = []
input_in = []
for state in self.state_learn_list:
# grabs state information from dictionary
key = self.dynam.x_dict[state]
# concatenate required variables for states
if (key[1] != 'angle'):
state_in.append(X[key[0]])
else:
state_in.append(np.sin(X[key[0]]) )
state_in.append(np.cos(X[key[0]]) )
for inp in self.input_learn_list:
# grabs state information from dictionary
key = self.dynam.u_dict[inp]
# concatenate required variables for states
input_in.append(U[key[0]])
# make numpy array
state_in = np.array(state_in)
input_in = np.array(input_in)
#normalizing and converting to single sample
normX = self.scalarX.transform(state_in.reshape(1, -1))
normU = self.scalarU.transform(input_in.reshape(1, -1))
input = Variable(torch.Tensor(np.concatenate((normX, normU), axis=1)))
NNout = self.postprocess(self.forward(input).data[0])
# need to make it so you can still simulate sequences on the learned sequences of not all variables. Our definition is for now that prediction is set to 0 for states we did not learn
out = np.zeros(self.dynam.x_dim)
idx_out = 0
for state in self.dynam.x_dict:
key = self.dynam.x_dict[state]
if state in self.state_learn_list:
out[key[0]] = NNout[idx_out]
idx_out += 1
# Debug
# print(np.shape(X))
# print(np.shape(U))
# print(np.shape(out))
# print(out)
return out
def _optimize(self, loss_fn, optim, epochs, batch_size, trainLoader, testLoader):
errors = []
for epoch in range(epochs):
avg_loss = Variable(torch.zeros(1))
num_batches = len(trainLoader)/batch_size
for i, (input, target) in enumerate(trainLoader):
#input = Variable(input.view(batch_size, -1)) # the input comes as a batch of 2d images which we flatten;
# view(-1) tells pytorch to fill in a dimension; here it's 784
input = Variable(input)
target = Variable(target, requires_grad=False) #Apparently the target can't have a gradient? kinda weird, but whatever
optim.zero_grad() # zero the gradient buffers
output = self.forward(input) # compute the output
loss = loss_fn(output, target) # compute the loss
loss.backward() # backpropagate from the loss to fill the gradient buffers
optim.step() # do a gradient descent step
if not loss.data.numpy() == loss.data.numpy(): # Some errors make the loss NaN. this is a problem.
print("loss is NaN") # This is helpful: it'll catch that when it happens,
return output, input, loss # and give the output and input that made the loss NaN
avg_loss += loss.data[0]/num_batches # update the overall average loss with this batch's loss
# Debugging:
# print('NN Output: ', output)
# print('Target: ', target)
# print(np.shape(output))
# print(np.shape(target))
test_error = 0
for (input, target) in testLoader: # compute the testing test_error
input = Variable(input)
target = Variable(target, requires_grad=False)
output = self.forward(input)
loss = loss_fn(output, target)
test_error += loss.data[0]
test_error = test_error / len(testLoader)
#print("Epoch:", '%04d' % (epoch + 1), "loss=", "{:.9f}".format(avg_loss.data[0]),
# "test_error={:.9f}".format(test_error))
print("Epoch:", '%04d' % (epoch + 1), "train loss=", "{:.6f}".format(avg_loss.data[0]), "test loss=", "{:.6f}".format(test_error))
errors.append(test_error)
return errors
def save_model(self, filepath):
# torch.save(self.state_dict(), filepath) # only param
torch.save(self, filepath) # full model state
# print(self.scalarX.get_params())
def load_model(self, filepath):
self.load_state_dict(torch.load(filepath))
class EnsembleNN(nn.Module):
"""
Creates an ensembleNN of parameters optimized from the standard NN
Dropout
"""
def __init__(self, base_net, num_nets):
# super(NeuralNet, self).__init__()
#To keep track of what the mean and variance are at all times for transformations
self.scalarX = StandardScaler()
self.scalarU = StandardScaler()
self.scalardX = StandardScaler()
# list of states and inputs to learn dynamics from
self.state_learn_list = base_net.state_learn_list
self.input_learn_list = base_net.input_learn_list
layer_sizes = [12, 100, 100, 9]
layer_types = ['nn.Linear()','nn.ReLU()', 'nn.ReLU()', 'nn.Linear()']
states_learn = ['yaw', 'pitch', 'roll', 'ax', 'ay', 'az']
# ['X', 'Y', 'Z', 'vx', 'vy', 'vz', 'yaw', 'pitch', 'roll', 'w_z', 'w_x', 'w_y']
forces_learn = ['Thrust', 'taux', 'tauy']
# dynam file for reference
self.dynam = base_net.dynam
# Creates list of nueral nets for processing
self.num_nets = num_nets
self.nets = []
for i in range(num_nets):
self.nets.append(copy.deepcopy(base_net))
def forward_ens(self, x):
"""
Standard forward function necessary if extending nn.Module. Basically a copy of nn.Sequential. Updated for bootstrap method to pass each net once
"""
xs = []
for net in self.nets:
x_sub = x
for module in net._modules.values():
x_sub = module(x_sub)
xs.append(x_sub)
# print(np.shape(xs))
x = torch.mean(torch.stack(xs), 0)
return x
def preprocess(self, X, U):
"""
Preprocess X and U (as they would be outputted by dynamics.generate_data) so they can be passed into the neural network for training
X and U should be numpy arrays with dimensionality X.shape = (num_iter, sequence_len, 12) U.shape = (num_iter, sequence_len, 4)
return: A
"""
#Getting output dX
dX = np.array([utils_data.states2delta(val) for val in X])
# Ignore last element of X and U sequences because do not see next state
X = X[:,:-1,:]
U = U[:,:-1,:]
#translating from [psi theta phi] to [sin(psi) sin(theta) sin(phi) cos(psi) cos(theta) cos(phi)]
# modX = np.concatenate((X[:, :, 0:6], np.sin(X[:, :, 6:9]), np.cos(X[:, :, 6:9]), X[:, :, 9:]), axis=2)
# dX = np.concatenate((dX[:, :, 0:6], np.sin(dX[:, :, 6:9]), np.cos(dX[:, :, 6:9]), dX[:, :, 9:]), axis=2)
# Adds the desired variables to the X data to learn
modX = []
moddX = []
# Adds the desired inputs to the U data to learn X with
modU = []
for i in range(np.shape(X)[0]):
seqX = X[i,:,:]
seqdX = dX[i,:,:]
seqU = U[i,:,:]
# intialize empty arrays to amke slicing easier
arr_X = []
arr_dX = []
arr_U = []
for state in self.state_learn_list:
# grabs state information from dictionary
key = self.dynam.x_dict[state]
# concatenate required variables for states
if (key[1] != 'angle'):
arr_X.append(seqX[:, key[0]])
arr_dX.append(seqdX[:, key[0]])
else:
arr_X.append(np.sin(seqX[:, key[0]]) )
arr_X.append(np.cos(seqX[:, key[0]]) )
arr_dX.append(np.sin(seqdX[:, key[0]]))
arr_dX.append(np.cos(seqdX[:, key[0]]) )
for inp in self.input_learn_list:
# grabs state information from dictionary
key = self.dynam.u_dict[inp]
# concatenate required variables for states
arr_U.append(seqU[:, key[0]])
# append the slice onto the array
modX.append(arr_X)
moddX.append(arr_dX)
modU.append(arr_U)
# cast to numpy arrays
modX = np.array(modX)
moddX = np.array(moddX)
modU = np.array(modU)
# swap axes for easy flatten & tensor
modX = np.swapaxes(modX, 1, 2)
moddX = np.swapaxes(moddX, 1, 2)
modU = np.swapaxes(modU, 1, 2)
#Follow by flattening the matrices so they look like input/output pairs
modX = modX.reshape(modX.shape[0]*modX.shape[1], -1)
modU = modU.reshape(modU.shape[0]*modU.shape[1], -1)
moddX = moddX.reshape(dX.shape[0]*dX.shape[1], -1)
#at this point they should look like input output pairs
if moddX.shape != modX.shape:
raise ValueError('Something went wrong, modified X shape:' + str(modX.shape) + ' dX shape:' + str(dX.shape))
#update mean and variance of the dataset with each training pass
self.scalarX.partial_fit(modX)
self.scalarU.partial_fit(modU)
self.scalardX.partial_fit(moddX)
#Normalizing to zero mean and unit variance
normX = self.scalarX.transform(modX)
normU = self.scalarU.transform(modU)
normdX = self.scalardX.transform(moddX)
inputs = torch.Tensor(np.concatenate((normX, normU), axis=1))
outputs = torch.Tensor(normdX)
# debugging
print('Preprocessing sizes:')
print(' ', np.shape(inputs))
print(' ', np.shape(outputs))
print(' ------')
return list(zip(inputs, outputs))
def postprocess(self, dX):
"""
Given the raw output from the neural network, post process it by rescaling by the mean and variance of the dataset and converting from cos, sin
to actual angle
"""
# de-normalize so to say
dX = self.scalardX.inverse_transform(dX)
out = []
ang_idx = 0
def NNout2State(dX):
# helper function for transforming the output of the NN back to useable state information
out = []
# Again needs to remove cos/sin of the correct variables the desired variables to the X data to learn
l = 0
# grabs state information from dictionary
for (i,state) in enumerate(self.state_learn_list):
# grabs state information from dictionary
key = self.dynam.x_dict[state]
# concatenate required variables for states
if (key[1] != 'angle'):
out.append(dX[i+l])
else:
out.append(np.arctan2(dX[i+l], dX[1+i+l]))
l+= 1
return out
# Call normalization on each state predicted
if len(np.shape(dX)) > 1:
for state in dX:
out.append(NNout2State(state))
else:
out = NNout2State(dX)
return np.array(out)
# out = np.concatenate((dX[:, :6], np.arctan2(dX[:, 6:9], dX[:, 9:12]), dX[:, 12:]), axis=1)
def train_ens(self, dataset, learning_rate = 1e-3, epochs=50, batch_size=50, optim="Adam", loss_fn=nn.MSELoss(), split=0.9, preprocess=True):
"""
Train the neural network.
if preprocess = False
dataset is a list of tuples to train on, where the first value in the tuple is the training data (should be implemented as a torch tensor), and the second value in the tuple
is the label/action taken
if preprocess = True
dataset is simply the raw output of generate data (X, U)
Epochs is number of times to train on given training data,
batch_size is hyperparameter dicating how large of a batch to use for training,
optim is the optimizer to use (options are "Adam", "SGD")
split is train/test split ratio
"""
if preprocess:
dataset = self.preprocess(dataset[0], dataset[1])
print('Length of dataset is:', len(dataset))
num_samples = len(dataset)
weights = (1/(num_samples+1))*np.ones((int(split*num_samples)))
# TODO: Update datasets by sampling with replacement for each net
# Make random sampling with replacement by using a evenly weighted random sampler with replacement
sampler = WeightedRandomSampler(weights, num_samples, replacement=True)
# Training loader has the sampler, testing does not matter.
trainLoader = DataLoader(dataset[:int(split*len(dataset))], sampler = sampler, batch_size=batch_size)
testLoader = DataLoader(dataset[int(split*len(dataset)):], batch_size=batch_size)
# TODO: Train each net separately
#Unclear if we should be using SGD or ADAM? Papers seem to say ADAM works better
# train each net
errors = []
for i, net in enumerate(self.nets):
if(optim=="Adam"):
optimizer = torch.optim.Adam(super(NeuralNet, net).parameters(), lr=learning_rate)
elif(optim=="SGD"):
optimizer = torch.optim.SGD(super(NeuralNet, net).parameters(), lr=learning_rate)
else:
raise ValueError(optim + " is not a valid optimizer type")
print('Training net ', i+1)
error = net._optimize(loss_fn, optimizer, epochs, batch_size, trainLoader, testLoader)
errors.append(error)
print('-------------------------------------------------------')
print(np.shape(errors))
return errors
def predict(self, X, U):
"""
Given a state X and input U, predict the change in state dX. This function does all pre and post processing for the neural net
"""
#Converting to sin/cos form
# state_in = np.concatenate((X[0:6], np.sin(X[6:9]), np.cos(X[6:9]), X[9:]))
state_in = []
input_in = []
for state in self.state_learn_list:
# grabs state information from dictionary
key = self.dynam.x_dict[state]
# concatenate required variables for states
if (key[1] != 'angle'):
state_in.append(X[key[0]])
else:
state_in.append(np.sin(X[key[0]]) )
state_in.append(np.cos(X[key[0]]) )
for inp in self.input_learn_list:
# grabs state information from dictionary
key = self.dynam.u_dict[inp]
# concatenate required variables for states
input_in.append(U[key[0]])
# make numpy array
state_in = | np.array(state_in) | numpy.array |
import os
import warnings
from six import BytesIO
from six.moves import cPickle
import numpy as np
from numpy.testing import assert_almost_equal, assert_allclose
import pandas as pd
import pandas.util.testing as tm
import pytest
from sm2 import datasets
from sm2.regression.linear_model import OLS
from sm2.tsa.arima_model import AR, ARMA, ARIMA
from sm2.tsa.arima_process import arma_generate_sample
from sm2.tools.sm_exceptions import MissingDataError
from .results import results_arma, results_arima
DECIMAL_4 = 4
DECIMAL_3 = 3
DECIMAL_2 = 2
DECIMAL_1 = 1
current_path = os.path.dirname(os.path.abspath(__file__))
path = os.path.join(current_path, 'results', 'y_arma_data.csv')
y_arma = pd.read_csv(path, float_precision='high').values
cpi_dates = pd.PeriodIndex(start='1959q1', end='2009q3', freq='Q')
sun_dates = pd.PeriodIndex(start='1700', end='2008', freq='A')
cpi_predict_dates = pd.PeriodIndex(start='2009q3', end='2015q4', freq='Q')
sun_predict_dates = pd.PeriodIndex(start='2008', end='2033', freq='A')
@pytest.mark.not_vetted
@pytest.mark.skip(reason="fa, Arma not ported from upstream")
def test_compare_arma():
# dummies to avoid flake8 warnings until porting
fa = None
Arma = None
# import statsmodels.sandbox.tsa.fftarma as fa
# from statsmodels.tsa.arma_mle import Arma
# this is a preliminary test to compare
# arma_kf, arma_cond_ls and arma_cond_mle
# the results returned by the fit methods are incomplete
# for now without random.seed
np.random.seed(9876565)
famod = fa.ArmaFft([1, -0.5], [1., 0.4], 40)
x = famod.generate_sample(nsample=200, burnin=1000)
modkf = ARMA(x, (1, 1))
reskf = modkf.fit(trend='nc', disp=-1)
dres = reskf
modc = Arma(x)
resls = modc.fit(order=(1, 1))
rescm = modc.fit_mle(order=(1, 1), start_params=[0.4, 0.4, 1.], disp=0)
# decimal 1 corresponds to threshold of 5% difference
# still different sign corrected
assert_almost_equal(resls[0] / dres.params,
np.ones(dres.params.shape),
decimal=1)
# TODO: Is the next comment still accurate. It is retained from upstream
# where there was a commented-out assertion after the comment
# rescm also contains variance estimate as last element of params
assert_almost_equal(rescm.params[:-1] / dres.params,
np.ones(dres.params.shape),
decimal=1)
@pytest.mark.not_vetted
class CheckArmaResultsMixin(object):
"""
res2 are the results from gretl. They are in results/results_arma.
res1 are from sm2
"""
decimal_params = DECIMAL_4
def test_params(self):
assert_almost_equal(self.res1.params,
self.res2.params,
self.decimal_params)
decimal_aic = DECIMAL_4
def test_aic(self):
assert_almost_equal(self.res1.aic,
self.res2.aic,
self.decimal_aic)
decimal_bic = DECIMAL_4
def test_bic(self):
assert_almost_equal(self.res1.bic,
self.res2.bic,
self.decimal_bic)
decimal_arroots = DECIMAL_4
def test_arroots(self):
assert_almost_equal(self.res1.arroots,
self.res2.arroots,
self.decimal_arroots)
decimal_maroots = DECIMAL_4
def test_maroots(self):
assert_almost_equal(self.res1.maroots,
self.res2.maroots,
self.decimal_maroots)
decimal_bse = DECIMAL_2
def test_bse(self):
assert_almost_equal(self.res1.bse,
self.res2.bse,
self.decimal_bse)
decimal_cov_params = DECIMAL_4
def test_covparams(self):
assert_almost_equal(self.res1.cov_params(),
self.res2.cov_params,
self.decimal_cov_params)
decimal_hqic = DECIMAL_4
def test_hqic(self):
assert_almost_equal(self.res1.hqic,
self.res2.hqic,
self.decimal_hqic)
decimal_llf = DECIMAL_4
def test_llf(self):
assert_almost_equal(self.res1.llf,
self.res2.llf,
self.decimal_llf)
decimal_resid = DECIMAL_4
def test_resid(self):
assert_almost_equal(self.res1.resid,
self.res2.resid,
self.decimal_resid)
decimal_fittedvalues = DECIMAL_4
def test_fittedvalues(self):
assert_almost_equal(self.res1.fittedvalues,
self.res2.fittedvalues,
self.decimal_fittedvalues)
decimal_pvalues = DECIMAL_2
def test_pvalues(self):
assert_almost_equal(self.res1.pvalues,
self.res2.pvalues,
self.decimal_pvalues)
decimal_t = DECIMAL_2 # only 2 decimal places in gretl output
def test_tvalues(self):
assert_almost_equal(self.res1.tvalues,
self.res2.tvalues,
self.decimal_t)
decimal_sigma2 = DECIMAL_4
def test_sigma2(self):
assert_almost_equal(self.res1.sigma2,
self.res2.sigma2,
self.decimal_sigma2)
@pytest.mark.smoke
def test_summary(self):
self.res1.summary()
@pytest.mark.not_vetted
class CheckForecastMixin(object):
decimal_forecast = DECIMAL_4
def test_forecast(self):
assert_almost_equal(self.res1.forecast_res,
self.res2.forecast,
self.decimal_forecast)
decimal_forecasterr = DECIMAL_4
def test_forecasterr(self):
assert_almost_equal(self.res1.forecast_err,
self.res2.forecasterr,
self.decimal_forecasterr)
@pytest.mark.not_vetted
class CheckDynamicForecastMixin(object):
decimal_forecast_dyn = 4
def test_dynamic_forecast(self):
assert_almost_equal(self.res1.forecast_res_dyn,
self.res2.forecast_dyn,
self.decimal_forecast_dyn)
#def test_forecasterr(self):
# assert_almost_equal(self.res1.forecast_err_dyn,
# self.res2.forecasterr_dyn,
# DECIMAL_4)
@pytest.mark.not_vetted
class CheckArimaResultsMixin(CheckArmaResultsMixin):
def test_order(self):
assert self.res1.k_diff == self.res2.k_diff
assert self.res1.k_ar == self.res2.k_ar
assert self.res1.k_ma == self.res2.k_ma
decimal_predict_levels = DECIMAL_4
def test_predict_levels(self):
assert_almost_equal(self.res1.predict(typ='levels'),
self.res2.linear,
self.decimal_predict_levels)
@pytest.mark.not_vetted
class Test_Y_ARMA11_NoConst(CheckArmaResultsMixin, CheckForecastMixin):
@classmethod
def setup_class(cls):
endog = y_arma[:, 0]
cls.res1 = ARMA(endog, order=(1, 1)).fit(trend='nc', disp=-1)
fc_res, fc_err, ci = cls.res1.forecast(10)
cls.res1.forecast_res = fc_res
cls.res1.forecast_err = fc_err
cls.res2 = results_arma.Y_arma11()
# TODO: share with test_ar? other test classes?
def test_pickle(self):
fh = BytesIO()
# test wrapped results load save pickle
self.res1.save(fh)
fh.seek(0, 0)
res_unpickled = self.res1.__class__.load(fh)
assert type(res_unpickled) is type(self.res1) # noqa:E721
# TODO: Test equality instead of just type equality?
@pytest.mark.not_vetted
class Test_Y_ARMA14_NoConst(CheckArmaResultsMixin):
@classmethod
def setup_class(cls):
endog = y_arma[:, 1]
cls.res1 = ARMA(endog, order=(1, 4)).fit(trend='nc', disp=-1)
cls.res2 = results_arma.Y_arma14()
@pytest.mark.not_vetted
@pytest.mark.slow
class Test_Y_ARMA41_NoConst(CheckArmaResultsMixin, CheckForecastMixin):
decimal_maroots = DECIMAL_3
@classmethod
def setup_class(cls):
endog = y_arma[:, 2]
cls.res1 = ARMA(endog, order=(4, 1)).fit(trend='nc', disp=-1)
(cls.res1.forecast_res, cls.res1.forecast_err,
confint) = cls.res1.forecast(10)
cls.res2 = results_arma.Y_arma41()
@pytest.mark.not_vetted
class Test_Y_ARMA22_NoConst(CheckArmaResultsMixin):
@classmethod
def setup_class(cls):
endog = y_arma[:, 3]
cls.res1 = ARMA(endog, order=(2, 2)).fit(trend='nc', disp=-1)
cls.res2 = results_arma.Y_arma22()
@pytest.mark.not_vetted
class Test_Y_ARMA50_NoConst(CheckArmaResultsMixin, CheckForecastMixin):
@classmethod
def setup_class(cls):
endog = y_arma[:, 4]
cls.res1 = ARMA(endog, order=(5, 0)).fit(trend='nc', disp=-1)
(cls.res1.forecast_res, cls.res1.forecast_err,
confint) = cls.res1.forecast(10)
cls.res2 = results_arma.Y_arma50()
@pytest.mark.not_vetted
class Test_Y_ARMA02_NoConst(CheckArmaResultsMixin):
@classmethod
def setup_class(cls):
endog = y_arma[:, 5]
cls.res1 = ARMA(endog, order=(0, 2)).fit(trend='nc', disp=-1)
cls.res2 = results_arma.Y_arma02()
@pytest.mark.not_vetted
class Test_Y_ARMA11_Const(CheckArmaResultsMixin, CheckForecastMixin):
@classmethod
def setup_class(cls):
endog = y_arma[:, 6]
cls.res1 = ARMA(endog, order=(1, 1)).fit(trend="c", disp=-1)
(cls.res1.forecast_res, cls.res1.forecast_err,
confint) = cls.res1.forecast(10)
cls.res2 = results_arma.Y_arma11c()
@pytest.mark.not_vetted
class Test_Y_ARMA14_Const(CheckArmaResultsMixin):
@classmethod
def setup_class(cls):
endog = y_arma[:, 7]
cls.res1 = ARMA(endog, order=(1, 4)).fit(trend="c", disp=-1)
cls.res2 = results_arma.Y_arma14c()
@pytest.mark.not_vetted
class Test_Y_ARMA41_Const(CheckArmaResultsMixin, CheckForecastMixin):
decimal_cov_params = DECIMAL_3
decimal_fittedvalues = DECIMAL_3
decimal_resid = DECIMAL_3
decimal_params = DECIMAL_3
@classmethod
def setup_class(cls):
endog = y_arma[:, 8]
cls.res2 = results_arma.Y_arma41c()
cls.res1 = ARMA(endog, order=(4, 1)).fit(trend="c", disp=-1,
start_params=cls.res2.params)
(cls.res1.forecast_res, cls.res1.forecast_err,
confint) = cls.res1.forecast(10)
@pytest.mark.not_vetted
class Test_Y_ARMA22_Const(CheckArmaResultsMixin):
@classmethod
def setup_class(cls):
endog = y_arma[:, 9]
cls.res1 = ARMA(endog, order=(2, 2)).fit(trend="c", disp=-1)
cls.res2 = results_arma.Y_arma22c()
@pytest.mark.not_vetted
class Test_Y_ARMA50_Const(CheckArmaResultsMixin, CheckForecastMixin):
@classmethod
def setup_class(cls):
endog = y_arma[:, 10]
cls.res1 = ARMA(endog, order=(5, 0)).fit(trend="c", disp=-1)
(cls.res1.forecast_res, cls.res1.forecast_err,
confint) = cls.res1.forecast(10)
cls.res2 = results_arma.Y_arma50c()
@pytest.mark.not_vetted
class Test_Y_ARMA02_Const(CheckArmaResultsMixin):
@classmethod
def setup_class(cls):
endog = y_arma[:, 11]
cls.res1 = ARMA(endog, order=(0, 2)).fit(trend="c", disp=-1)
cls.res2 = results_arma.Y_arma02c()
# cov_params and tvalues are off still but not as much vs. R
@pytest.mark.not_vetted
class Test_Y_ARMA11_NoConst_CSS(CheckArmaResultsMixin):
decimal_t = DECIMAL_1
@classmethod
def setup_class(cls):
endog = y_arma[:, 0]
cls.res1 = ARMA(endog, order=(1, 1)).fit(method="css",
trend="nc", disp=-1)
cls.res2 = results_arma.Y_arma11("css")
# better vs. R
@pytest.mark.not_vetted
class Test_Y_ARMA14_NoConst_CSS(CheckArmaResultsMixin):
decimal_fittedvalues = DECIMAL_3
decimal_resid = DECIMAL_3
decimal_t = DECIMAL_1
@classmethod
def setup_class(cls):
endog = y_arma[:, 1]
cls.res1 = ARMA(endog, order=(1, 4)).fit(method="css",
trend="nc", disp=-1)
cls.res2 = results_arma.Y_arma14("css")
# bse, etc. better vs. R
# maroot is off because maparams is off a bit (adjust tolerance?)
@pytest.mark.not_vetted
class Test_Y_ARMA41_NoConst_CSS(CheckArmaResultsMixin):
decimal_t = DECIMAL_1
decimal_pvalues = 0
decimal_cov_params = DECIMAL_3
decimal_maroots = DECIMAL_1
@classmethod
def setup_class(cls):
endog = y_arma[:, 2]
cls.res1 = ARMA(endog, order=(4, 1)).fit(method="css",
trend="nc", disp=-1)
cls.res2 = results_arma.Y_arma41("css")
# same notes as above
@pytest.mark.not_vetted
class Test_Y_ARMA22_NoConst_CSS(CheckArmaResultsMixin):
decimal_t = DECIMAL_1
decimal_resid = DECIMAL_3
decimal_pvalues = DECIMAL_1
decimal_fittedvalues = DECIMAL_3
@classmethod
def setup_class(cls):
endog = y_arma[:, 3]
cls.res1 = ARMA(endog, order=(2, 2)).fit(method="css",
trend="nc", disp=-1)
cls.res2 = results_arma.Y_arma22("css")
# NOTE: gretl just uses least squares for AR CSS
# so BIC, etc. is
# -2*res1.llf + np.log(nobs)*(res1.q+res1.p+res1.k)
# with no adjustment for p and no extra sigma estimate
# NOTE: so our tests use x-12 arima results which agree with us and are
# consistent with the rest of the models
@pytest.mark.not_vetted
class Test_Y_ARMA50_NoConst_CSS(CheckArmaResultsMixin):
decimal_t = 0
decimal_llf = DECIMAL_1 # looks like rounding error?
@classmethod
def setup_class(cls):
endog = y_arma[:, 4]
cls.res1 = ARMA(endog, order=(5, 0)).fit(method="css",
trend="nc", disp=-1)
cls.res2 = results_arma.Y_arma50("css")
@pytest.mark.not_vetted
class Test_Y_ARMA02_NoConst_CSS(CheckArmaResultsMixin):
@classmethod
def setup_class(cls):
endog = y_arma[:, 5]
cls.res1 = ARMA(endog, order=(0, 2)).fit(method="css",
trend="nc", disp=-1)
cls.res2 = results_arma.Y_arma02("css")
# NOTE: our results are close to --x-12-arima option and R
@pytest.mark.not_vetted
class Test_Y_ARMA11_Const_CSS(CheckArmaResultsMixin):
decimal_params = DECIMAL_3
decimal_cov_params = DECIMAL_3
decimal_t = DECIMAL_1
@classmethod
def setup_class(cls):
endog = y_arma[:, 6]
cls.res1 = ARMA(endog, order=(1, 1)).fit(trend="c",
method="css", disp=-1)
cls.res2 = results_arma.Y_arma11c("css")
@pytest.mark.not_vetted
class Test_Y_ARMA14_Const_CSS(CheckArmaResultsMixin):
decimal_t = DECIMAL_1
decimal_pvalues = DECIMAL_1
@classmethod
def setup_class(cls):
endog = y_arma[:, 7]
cls.res1 = ARMA(endog, order=(1, 4)).fit(trend="c",
method="css", disp=-1)
cls.res2 = results_arma.Y_arma14c("css")
@pytest.mark.not_vetted
class Test_Y_ARMA41_Const_CSS(CheckArmaResultsMixin):
decimal_t = DECIMAL_1
decimal_cov_params = DECIMAL_1
decimal_maroots = DECIMAL_3
decimal_bse = DECIMAL_1
@classmethod
def setup_class(cls):
endog = y_arma[:, 8]
cls.res1 = ARMA(endog, order=(4, 1)).fit(trend="c",
method="css", disp=-1)
cls.res2 = results_arma.Y_arma41c("css")
@pytest.mark.not_vetted
class Test_Y_ARMA22_Const_CSS(CheckArmaResultsMixin):
decimal_t = 0
decimal_pvalues = DECIMAL_1
@classmethod
def setup_class(cls):
endog = y_arma[:, 9]
cls.res1 = ARMA(endog, order=(2, 2)).fit(trend="c",
method="css", disp=-1)
cls.res2 = results_arma.Y_arma22c("css")
@pytest.mark.not_vetted
class Test_Y_ARMA50_Const_CSS(CheckArmaResultsMixin):
decimal_t = DECIMAL_1
decimal_params = DECIMAL_3
decimal_cov_params = DECIMAL_2
@classmethod
def setup_class(cls):
endog = y_arma[:, 10]
cls.res1 = ARMA(endog, order=(5, 0)).fit(trend="c",
method="css", disp=-1)
cls.res2 = results_arma.Y_arma50c("css")
@pytest.mark.not_vetted
class Test_Y_ARMA02_Const_CSS(CheckArmaResultsMixin):
@classmethod
def setup_class(cls):
endog = y_arma[:, 11]
cls.res1 = ARMA(endog, order=(0, 2)).fit(trend="c",
method="css", disp=-1)
cls.res2 = results_arma.Y_arma02c("css")
@pytest.mark.not_vetted
class Test_ARIMA101(CheckArmaResultsMixin):
# just make sure this works
@classmethod
def setup_class(cls):
endog = y_arma[:, 6]
cls.res1 = ARIMA(endog, (1, 0, 1)).fit(trend="c", disp=-1)
(cls.res1.forecast_res, cls.res1.forecast_err,
confint) = cls.res1.forecast(10)
cls.res2 = results_arma.Y_arma11c()
cls.res2.k_diff = 0
cls.res2.k_ar = 1
cls.res2.k_ma = 1
@pytest.mark.not_vetted
class Test_ARIMA111(CheckArimaResultsMixin, CheckForecastMixin,
CheckDynamicForecastMixin):
decimal_llf = 3
decimal_aic = 3
decimal_bic = 3
decimal_cov_params = 2 # this used to be better?
decimal_t = 0
@classmethod
def setup_class(cls):
cpi = datasets.macrodata.load_pandas().data['cpi'].values
cls.res1 = ARIMA(cpi, (1, 1, 1)).fit(disp=-1)
cls.res2 = results_arima.ARIMA111()
# make sure endog names changes to D.cpi
(cls.res1.forecast_res,
cls.res1.forecast_err,
conf_int) = cls.res1.forecast(25)
# TODO: fix the indexing for the end here, I don't think this is right
# if we're going to treat it like indexing
# the forecast from 2005Q1 through 2009Q4 is indices
# 184 through 227 not 226
# note that the first one counts in the count so 164 + 64 is 65
# predictions
cls.res1.forecast_res_dyn = cls.res1.predict(start=164, end=164 + 63,
typ='levels',
dynamic=True)
def test_freq(self):
assert_almost_equal(self.res1.arfreq, [0.0000], 4)
assert_almost_equal(self.res1.mafreq, [0.0000], 4)
@pytest.mark.not_vetted
class Test_ARIMA111CSS(CheckArimaResultsMixin, CheckForecastMixin,
CheckDynamicForecastMixin):
decimal_forecast = 2
decimal_forecast_dyn = 2
decimal_forecasterr = 3
decimal_arroots = 3
decimal_cov_params = 3
decimal_hqic = 3
decimal_maroots = 3
decimal_t = 1
decimal_fittedvalues = 2 # because of rounding when copying
decimal_resid = 2
decimal_predict_levels = DECIMAL_2
@classmethod
def setup_class(cls):
cpi = datasets.macrodata.load_pandas().data['cpi'].values
cls.res1 = ARIMA(cpi, (1, 1, 1)).fit(disp=-1, method='css')
cls.res2 = results_arima.ARIMA111(method='css')
cls.res2.fittedvalues = - cpi[1:-1] + cls.res2.linear
# make sure endog names changes to D.cpi
(fc_res, fc_err, conf_int) = cls.res1.forecast(25)
cls.res1.forecast_res = fc_res
cls.res1.forecast_err = fc_err
cls.res1.forecast_res_dyn = cls.res1.predict(start=164, end=164 + 63,
typ='levels',
dynamic=True)
@pytest.mark.not_vetted
class Test_ARIMA112CSS(CheckArimaResultsMixin):
decimal_llf = 3
decimal_aic = 3
decimal_bic = 3
decimal_arroots = 3
decimal_maroots = 2
decimal_t = 1
decimal_resid = 2
decimal_fittedvalues = 3
decimal_predict_levels = DECIMAL_3
@classmethod
def setup_class(cls):
cpi = datasets.macrodata.load_pandas().data['cpi'].values
cls.res1 = ARIMA(cpi, (1, 1, 2)).fit(disp=-1, method='css',
start_params=[.905322, -.692425,
1.07366, 0.172024])
cls.res2 = results_arima.ARIMA112(method='css')
cls.res2.fittedvalues = - cpi[1:-1] + cls.res2.linear
# make sure endog names changes to D.cpi
#(cls.res1.forecast_res,
# cls.res1.forecast_err,
# conf_int) = cls.res1.forecast(25)
#cls.res1.forecast_res_dyn = cls.res1.predict(start=164, end=226,
# typ='levels',
# dynamic=True)
# TODO: fix the indexing for the end here, I don't think this is right
# if we're going to treat it like indexing
# the forecast from 2005Q1 through 2009Q4 is indices
# 184 through 227 not 226
# note that the first one counts in the count so 164 + 64 is 65
# predictions
#cls.res1.forecast_res_dyn = self.predict(start=164, end=164+63,
# typ='levels', dynamic=True)
# since we got from gretl don't have linear prediction in differences
def test_freq(self):
assert_almost_equal(self.res1.arfreq, [0.5000], 4)
assert_almost_equal(self.res1.mafreq, [0.5000, 0.5000], 4)
#class Test_ARIMADates(CheckArmaResults, CheckForecast, CheckDynamicForecast):
# @classmethod
# def setup_class(cls):
# cpi = datasets.macrodata.load_pandas().data['cpi'].values
# dates = pd.date_range('1959', periods=203, freq='Q')
# cls.res1 = ARIMA(cpi, dates=dates, freq='Q').fit(order=(1, 1, 1),
# disp=-1)
# cls.res2 = results_arima.ARIMA111()
# # make sure endog names changes to D.cpi
# cls.decimal_llf = 3
# cls.decimal_aic = 3
# cls.decimal_bic = 3
# (cls.res1.forecast_res,
# cls.res1.forecast_err,
# conf_int) = cls.res1.forecast(25)
@pytest.mark.not_vetted
@pytest.mark.slow
def test_start_params_bug():
data = np.array([
1368., 1187, 1090, 1439, 2362, 2783, 2869, 2512, 1804,
1544, 1028, 869, 1737, 2055, 1947, 1618, 1196, 867, 997, 1862, 2525,
3250, 4023, 4018, 3585, 3004, 2500, 2441, 2749, 2466, 2157, 1847,
1463, 1146, 851, 993, 1448, 1719, 1709, 1455, 1950, 1763, 2075, 2343,
3570, 4690, 3700, 2339, 1679, 1466, 998, 853, 835, 922, 851, 1125,
1299, 1105, 860, 701, 689, 774, 582, 419, 846, 1132, 902, 1058, 1341,
1551, 1167, 975, 786, 759, 751, 649, 876, 720, 498, 553, 459, 543,
447, 415, 377, 373, 324, 320, 306, 259, 220, 342, 558, 825, 994,
1267, 1473, 1601, 1896, 1890, 2012, 2198, 2393, 2825, 3411, 3406,
2464, 2891, 3685, 3638, 3746, 3373, 3190, 2681, 2846, 4129, 5054,
5002, 4801, 4934, 4903, 4713, 4745, 4736, 4622, 4642, 4478, 4510,
4758, 4457, 4356, 4170, 4658, 4546, 4402, 4183, 3574, 2586, 3326,
3948, 3983, 3997, 4422, 4496, 4276, 3467, 2753, 2582, 2921, 2768,
2789, 2824, 2482, 2773, 3005, 3641, 3699, 3774, 3698, 3628, 3180,
3306, 2841, 2014, 1910, 2560, 2980, 3012, 3210, 3457, 3158, 3344,
3609, 3327, 2913, 2264, 2326, 2596, 2225, 1767, 1190, 792, 669,
589, 496, 354, 246, 250, 323, 495, 924, 1536, 2081, 2660, 2814, 2992,
3115, 2962, 2272, 2151, 1889, 1481, 955, 631, 288, 103, 60, 82, 107,
185, 618, 1526, 2046, 2348, 2584, 2600, 2515, 2345, 2351, 2355,
2409, 2449, 2645, 2918, 3187, 2888, 2610, 2740, 2526, 2383, 2936,
2968, 2635, 2617, 2790, 3906, 4018, 4797, 4919, 4942, 4656, 4444,
3898, 3908, 3678, 3605, 3186, 2139, 2002, 1559, 1235, 1183, 1096,
673, 389, 223, 352, 308, 365, 525, 779, 894, 901, 1025, 1047, 981,
902, 759, 569, 519, 408, 263, 156, 72, 49, 31, 41, 192, 423, 492,
552, 564, 723, 921, 1525, 2768, 3531, 3824, 3835, 4294, 4533, 4173,
4221, 4064, 4641, 4685, 4026, 4323, 4585, 4836, 4822, 4631, 4614,
4326, 4790, 4736, 4104, 5099, 5154, 5121, 5384, 5274, 5225, 4899,
5382, 5295, 5349, 4977, 4597, 4069, 3733, 3439, 3052, 2626, 1939,
1064, 713, 916, 832, 658, 817, 921, 772, 764, 824, 967, 1127, 1153,
824, 912, 957, 990, 1218, 1684, 2030, 2119, 2233, 2657, 2652, 2682,
2498, 2429, 2346, 2298, 2129, 1829, 1816, 1225, 1010, 748, 627, 469,
576, 532, 475, 582, 641, 605, 699, 680, 714, 670, 666, 636, 672,
679, 446, 248, 134, 160, 178, 286, 413, 676, 1025, 1159, 952, 1398,
1833, 2045, 2072, 1798, 1799, 1358, 727, 353, 347, 844, 1377, 1829,
2118, 2272, 2745, 4263, 4314, 4530, 4354, 4645, 4547, 5391, 4855,
4739, 4520, 4573, 4305, 4196, 3773, 3368, 2596, 2596, 2305, 2756,
3747, 4078, 3415, 2369, 2210, 2316, 2263, 2672, 3571, 4131, 4167,
4077, 3924, 3738, 3712, 3510, 3182, 3179, 2951, 2453, 2078, 1999,
2486, 2581, 1891, 1997, 1366, 1294, 1536, 2794, 3211, 3242, 3406,
3121, 2425, 2016, 1787, 1508, 1304, 1060, 1342, 1589, 2361, 3452,
2659, 2857, 3255, 3322, 2852, 2964, 3132, 3033, 2931, 2636, 2818, 3310,
3396, 3179, 3232, 3543, 3759, 3503, 3758, 3658, 3425, 3053, 2620, 1837,
923, 712, 1054, 1376, 1556, 1498, 1523, 1088, 728, 890, 1413, 2524,
3295, 4097, 3993, 4116, 3874, 4074, 4142, 3975, 3908, 3907, 3918, 3755,
3648, 3778, 4293, 4385, 4360, 4352, 4528, 4365, 3846, 4098, 3860, 3230,
2820, 2916, 3201, 3721, 3397, 3055, 2141, 1623, 1825, 1716, 2232, 2939,
3735, 4838, 4560, 4307, 4975, 5173, 4859, 5268, 4992, 5100, 5070, 5270,
4760, 5135, 5059, 4682, 4492, 4933, 4737, 4611, 4634, 4789, 4811, 4379,
4689, 4284, 4191, 3313, 2770, 2543, 3105, 2967, 2420, 1996, 2247, 2564,
2726, 3021, 3427, 3509, 3759, 3324, 2988, 2849, 2340, 2443, 2364, 1252,
623, 742, 867, 684, 488, 348, 241, 187, 279, 355, 423, 678, 1375, 1497,
1434, 2116, 2411, 1929, 1628, 1635, 1609, 1757, 2090, 2085, 1790, 1846,
2038, 2360, 2342, 2401, 2920, 3030, 3132, 4385, 5483, 5865, 5595, 5485,
5727, 5553, 5560, 5233, 5478, 5159, 5155, 5312, 5079, 4510, 4628, 4535,
3656, 3698, 3443, 3146, 2562, 2304, 2181, 2293, 1950, 1930, 2197, 2796,
3441, 3649, 3815, 2850, 4005, 5305, 5550, 5641, 4717, 5131, 2831, 3518,
3354, 3115, 3515, 3552, 3244, 3658, 4407, 4935, 4299, 3166, 3335, 2728,
2488, 2573, 2002, 1717, 1645, 1977, 2049, 2125, 2376, 2551, 2578, 2629,
2750, 3150, 3699, 4062, 3959, 3264, 2671, 2205, 2128, 2133, 2095, 1964,
2006, 2074, 2201, 2506, 2449, 2465, 2064, 1446, 1382, 983, 898, 489,
319, 383, 332, 276, 224, 144, 101, 232, 429, 597, 750, 908, 960, 1076,
951, 1062, 1183, 1404, 1391, 1419, 1497, 1267, 963, 682, 777, 906,
1149, 1439, 1600, 1876, 1885, 1962, 2280, 2711, 2591, 2411])
with warnings.catch_warnings():
warnings.simplefilter("ignore")
ARMA(data, order=(4, 1)).fit(start_ar_lags=5, disp=-1)
@pytest.mark.not_vetted
def test_arima_predict_mle_dates():
cpi = datasets.macrodata.load_pandas().data['cpi'].values
res1 = ARIMA(cpi, (4, 1, 1), dates=cpi_dates, freq='Q').fit(disp=-1)
path = os.path.join(current_path, 'results',
'results_arima_forecasts_all_mle.csv')
arima_forecasts = pd.read_csv(path).values
fc = arima_forecasts[:, 0]
fcdyn = arima_forecasts[:, 1]
fcdyn2 = arima_forecasts[:, 2]
start, end = 2, 51
fv = res1.predict('1959Q3', '1971Q4', typ='levels')
assert_almost_equal(fv, fc[start:end + 1], DECIMAL_4)
tm.assert_index_equal(res1.data.predict_dates,
cpi_dates[start:end + 1])
start, end = 202, 227
fv = res1.predict('2009Q3', '2015Q4', typ='levels')
assert_almost_equal(fv, fc[start:end + 1], DECIMAL_4)
tm.assert_index_equal(res1.data.predict_dates,
cpi_predict_dates)
# make sure dynamic works
start, end = '1960q2', '1971q4'
fv = res1.predict(start, end, dynamic=True, typ='levels')
assert_almost_equal(fv, fcdyn[5:51 + 1], DECIMAL_4)
start, end = '1965q1', '2015q4'
fv = res1.predict(start, end, dynamic=True, typ='levels')
assert_almost_equal(fv, fcdyn2[24:227 + 1], DECIMAL_4)
@pytest.mark.not_vetted
def test_arma_predict_mle_dates():
sunspots = datasets.sunspots.load_pandas().data['SUNACTIVITY'].values
mod = ARMA(sunspots, (9, 0), dates=sun_dates, freq='A')
mod.method = 'mle'
with pytest.raises(ValueError):
mod._get_prediction_index('1701', '1751', True)
start, end = 2, 51
mod._get_prediction_index('1702', '1751', False)
tm.assert_index_equal(mod.data.predict_dates, sun_dates[start:end + 1])
start, end = 308, 333
mod._get_prediction_index('2008', '2033', False)
tm.assert_index_equal(mod.data.predict_dates, sun_predict_dates)
@pytest.mark.not_vetted
def test_arima_predict_css_dates():
cpi = datasets.macrodata.load_pandas().data['cpi'].values
res1 = ARIMA(cpi, (4, 1, 1), dates=cpi_dates, freq='Q').fit(disp=-1,
method='css',
trend='nc')
params = np.array([1.231272508473910,
-0.282516097759915,
0.170052755782440,
-0.118203728504945,
-0.938783134717947])
path = os.path.join(current_path, 'results',
'results_arima_forecasts_all_css.csv')
arima_forecasts = pd.read_csv(path).values
fc = arima_forecasts[:, 0]
fcdyn = arima_forecasts[:, 1]
fcdyn2 = arima_forecasts[:, 2]
start, end = 5, 51
fv = res1.model.predict(params, '1960Q2', '1971Q4', typ='levels')
assert_almost_equal(fv, fc[start:end + 1], DECIMAL_4)
tm.assert_index_equal(res1.data.predict_dates, cpi_dates[start:end + 1])
start, end = 202, 227
fv = res1.model.predict(params, '2009Q3', '2015Q4', typ='levels')
assert_almost_equal(fv, fc[start:end + 1], DECIMAL_4)
tm.assert_index_equal(res1.data.predict_dates, cpi_predict_dates)
# make sure dynamic works
start, end = 5, 51
fv = res1.model.predict(params, '1960Q2', '1971Q4', typ='levels',
dynamic=True)
assert_almost_equal(fv, fcdyn[start:end + 1], DECIMAL_4)
start, end = '1965q1', '2015q4'
fv = res1.model.predict(params, start, end, dynamic=True, typ='levels')
assert_almost_equal(fv, fcdyn2[24:227 + 1], DECIMAL_4)
@pytest.mark.not_vetted
def test_arma_predict_css_dates():
# TODO: GH reference?
sunspots = datasets.sunspots.load_pandas().data['SUNACTIVITY'].values
mod = ARMA(sunspots, (9, 0), dates=sun_dates, freq='A')
mod.method = 'css'
with pytest.raises(ValueError):
mod._get_prediction_index('1701', '1751', False)
def test_arima_wrapper():
# test that names get attached to res.params correctly
# TODO: GH reference?
cpi = datasets.macrodata.load_pandas().data['cpi']
cpi.index = pd.Index(cpi_dates)
res = ARIMA(cpi, (4, 1, 1), freq='Q').fit(disp=-1)
expected_index = pd.Index(['const', 'ar.L1.D.cpi', 'ar.L2.D.cpi',
'ar.L3.D.cpi', 'ar.L4.D.cpi',
'ma.L1.D.cpi'])
assert expected_index.equals(res.params.index)
tm.assert_index_equal(res.params.index, expected_index)
assert res.model.endog_names == 'D.cpi'
@pytest.mark.not_vetted
@pytest.mark.smoke
def test_1dexog():
# smoke test, this will raise an error if broken
dta = datasets.macrodata.load_pandas().data
endog = dta['realcons'].values
exog = dta['m1'].values.squeeze()
with warnings.catch_warnings():
warnings.simplefilter("ignore")
mod = ARMA(endog, (1, 1), exog).fit(disp=-1)
mod.predict(193, 203, exog[-10:])
# check for dynamic is true and pandas Series see GH#2589
mod.predict(193, 202, exog[-10:], dynamic=True)
dta.index = pd.Index(cpi_dates)
mod = ARMA(dta['realcons'], (1, 1), dta['m1'])
res = mod.fit(disp=-1)
res.predict(dta.index[-10], dta.index[-1],
exog=dta['m1'][-10:], dynamic=True)
mod = ARMA(dta['realcons'], (1, 1), dta['m1'])
res = mod.fit(trend='nc', disp=-1)
res.predict(dta.index[-10], dta.index[-1],
exog=dta['m1'][-10:], dynamic=True)
@pytest.mark.not_vetted
def test_arima_predict_bug():
# predict_start_date wasn't getting set on start = None
# TODO: GH reference?
dta = datasets.sunspots.load_pandas().data['SUNACTIVITY']
dta.index = pd.DatetimeIndex(start='1700', end='2009', freq='A')[:309]
arma_mod20 = ARMA(dta, (2, 0)).fit(disp=-1)
arma_mod20.predict(None, None)
# test prediction with time stamp, see GH#2587
predict = arma_mod20.predict(dta.index[-20], dta.index[-1])
assert predict.index.equals(dta.index[-20:])
predict = arma_mod20.predict(dta.index[-20], dta.index[-1], dynamic=True)
assert predict.index.equals(dta.index[-20:])
# partially out of sample
predict_dates = pd.DatetimeIndex(start='2000', end='2015', freq='A')
predict = arma_mod20.predict(predict_dates[0], predict_dates[-1])
assert predict.index.equals(predict_dates)
@pytest.mark.not_vetted
def test_arima_predict_q2():
# bug with q > 1 for arima predict
# TODO: GH reference?
inv = datasets.macrodata.load().data['realinv']
arima_mod = ARIMA(np.log(inv), (1, 1, 2)).fit(start_params=[0, 0, 0, 0],
disp=-1)
fc, stderr, conf_int = arima_mod.forecast(5)
# values copy-pasted from gretl
assert_almost_equal(fc,
[7.306320, 7.313825, 7.321749, 7.329827, 7.337962],
5)
@pytest.mark.not_vetted
def test_arima_predict_pandas_nofreq():
# GH#712
dates = ["2010-01-04", "2010-01-05", "2010-01-06", "2010-01-07",
"2010-01-08", "2010-01-11", "2010-01-12", "2010-01-11",
"2010-01-12", "2010-01-13", "2010-01-17"]
close = [626.75, 623.99, 608.26, 594.1, 602.02, 601.11, 590.48, 587.09,
589.85, 580.0, 587.62]
data = pd.DataFrame(close, index=pd.DatetimeIndex(dates),
columns=["close"])
# TODO: fix this names bug for non-string names names
arma = ARMA(data, order=(1, 0)).fit(disp=-1)
# first check that in-sample prediction works
predict = arma.predict()
assert predict.index.equals(data.index)
# check that this raises an exception when date not on index
with pytest.raises(KeyError):
arma.predict(start="2010-1-9", end=10)
with pytest.raises(KeyError):
arma.predict(start="2010-1-9", end="2010-1-17")
# raise because end not on index
with pytest.raises(KeyError):
arma.predict(start="2010-1-4", end="2010-1-10")
# raise because end not on index
with pytest.raises(KeyError):
arma.predict(start=3, end="2010-1-10")
predict = arma.predict(start="2010-1-7", end=10) # should be of length 10
assert len(predict) == 8
assert predict.index.equals(data.index[3:10 + 1])
predict = arma.predict(start="2010-1-7", end=14)
assert predict.index.equals(pd.Index(range(3, 15)))
predict = arma.predict(start=3, end=14)
assert predict.index.equals(pd.Index(range(3, 15)))
# end can be a date if it's in the sample and on the index
# predict dates is just a slice of the dates index then
predict = arma.predict(start="2010-1-6", end="2010-1-13")
assert predict.index.equals(data.index[2:10])
predict = arma.predict(start=2, end="2010-1-13")
assert predict.index.equals(data.index[2:10])
@pytest.mark.not_vetted
def test_arima_predict_exog():
# check GH#625 and GH#626
# Note: upstream there is a bunch of commented-out code after this point;
# I have not been able to get an explanation as to if/why it is worth
# keeping.
# TODO: At some point check back to see if it has been addressed.
path = os.path.join(current_path, 'results',
'results_arima_exog_forecasts_mle.csv')
arima_forecasts = pd.read_csv(path)
y = arima_forecasts["y"].dropna()
X = np.arange(len(y) + 25) / 20.
predict_expected = arima_forecasts["predict"]
arma_res = ARMA(y.values, order=(2, 1), exog=X[:100]).fit(trend="c",
disp=-1)
# params from gretl
params = np.array([2.786912485145725, -0.122650190196475,
0.533223846028938, -0.319344321763337,
0.132883233000064])
assert_almost_equal(arma_res.params, params, 5)
# no exog for in-sample
predict = arma_res.predict()
assert_almost_equal(predict, predict_expected.values[:100], 5)
# check GH#626
assert len(arma_res.model.exog_names) == 5
# exog for out-of-sample and in-sample dynamic
predict = arma_res.model.predict(params, end=124, exog=X[100:])
assert_almost_equal(predict, predict_expected.values, 6)
# conditional sum of squares
#arima_forecasts = pd.read_csv(current_path + "/results/"
# "results_arima_exog_forecasts_css.csv")
#predict_expected = arima_forecasts["predict"].dropna()
#arma_res = ARMA(y.values, order=(2, 1), exog=X[:100]).fit(trend="c",
# method="css",
# disp=-1)
#params = np.array([2.152350033809826, -0.103602399018814,
# 0.566716580421188, -0.326208009247944,
# 0.102142932143421])
#predict = arma_res.model.predict(params)
# in-sample
#assert_almost_equal(predict, predict_expected.values[:98], 6)
#predict = arma_res.model.predict(params, end=124, exog=X[100:])
# exog for out-of-sample and in-sample dynamic
#assert_almost_equal(predict, predict_expected.values, 3)
@pytest.mark.not_vetted
def test_arimax():
dta = datasets.macrodata.load_pandas().data
dta.index = cpi_dates
dta = dta[["realdpi", "m1", "realgdp"]]
y = dta.pop("realdpi")
# 1 exog
#X = dta.iloc[1:]["m1"]
#res = ARIMA(y, (2, 1, 1), X).fit(disp=-1)
#params = [23.902305009084373, 0.024650911502790, -0.162140641341602,
# 0.165262136028113, -0.066667022903974]
#assert_almost_equal(res.params.values, params, 6)
# 2 exog
X = dta
res = ARIMA(y, (2, 1, 1), X).fit(disp=False, solver="nm", maxiter=1000,
ftol=1e-12, xtol=1e-12)
# from gretl; we use the versions from stata below instead
# params = [13.113976653926638, -0.003792125069387, 0.004123504809217,
# -0.199213760940898, 0.151563643588008, -0.033088661096699]
# from stata using double
stata_llf = -1076.108614859121
params = [13.1259220104, -0.00376814509403812, 0.00411970083135622,
-0.19921477896158524, 0.15154396192855729, -0.03308400760360837]
# we can get close
assert_almost_equal(res.params.values, params, 4)
# This shows that it's an optimizer problem and not a problem in the code
assert_almost_equal(res.model.loglike(np.array(params)), stata_llf, 6)
X = dta.diff()
X.iloc[0] = 0
res = ARIMA(y, (2, 1, 1), X).fit(disp=False)
# gretl won't estimate this - looks like maybe a bug on their part,
# but we can just fine, we're close to Stata's answer
# from Stata
params = [19.5656863783347, 0.32653841355833396198,
0.36286527042965188716, -1.01133792126884,
-0.15722368379307766206, 0.69359822544092153418]
assert_almost_equal(res.params.values, params, 3)
@pytest.mark.not_vetted
def test_bad_start_params():
# TODO: what is bad about these params??
# TODO: GH reference?
endog = np.array([
820.69093, 781.0103028, 785.8786988, 767.64282267,
778.9837648, 824.6595702, 813.01877867, 751.65598567,
753.431091, 746.920813, 795.6201904, 772.65732833,
793.4486454, 868.8457766, 823.07226547, 783.09067747,
791.50723847, 770.93086347, 835.34157333, 810.64147947,
738.36071367, 776.49038513, 822.93272333, 815.26461227,
773.70552987, 777.3726522, 811.83444853, 840.95489133,
777.51031933, 745.90077307, 806.95113093, 805.77521973,
756.70927733, 749.89091773, 1694.2266924, 2398.4802244,
1434.6728516, 909.73940427, 929.01291907, 769.07561453,
801.1112548, 796.16163313, 817.2496376, 857.73046447,
838.849345, 761.92338873, 731.7842242, 770.4641844])
mod = ARMA(endog, (15, 0))
with pytest.raises(ValueError):
mod.fit()
inv = datasets.macrodata.load().data['realinv']
arima_mod = ARIMA(np.log(inv), (1, 1, 2))
with pytest.raises(ValueError):
# TODO: Upstream this incorrectly re-tries `mod.fit()`
arima_mod.fit()
@pytest.mark.not_vetted
def test_armax_predict_no_trend():
# GH#1123 test ARMAX predict doesn't ignore exog when trend is none
arparams = np.array([.75, -.25])
maparams = np.array([.65, .35])
nobs = 20
np.random.seed(12345)
y = arma_generate_sample(arparams, maparams, nobs)
X = np.random.randn(nobs)
y += 5 * X
mod = ARMA(y[:-1], order=(1, 0), exog=X[:-1])
res = mod.fit(trend='nc', disp=False)
fc = res.forecast(exog=X[-1:])
# results from gretl
assert_almost_equal(fc[0], 2.200393, 6)
assert_almost_equal(fc[1], 1.030743, 6)
assert_almost_equal(fc[2][0, 0], 0.180175, 6)
assert_almost_equal(fc[2][0, 1], 4.220611, 6)
mod = ARMA(y[:-1], order=(1, 1), exog=X[:-1])
res = mod.fit(trend='nc', disp=False)
fc = res.forecast(exog=X[-1:])
assert_almost_equal(fc[0], 2.765688, 6)
assert_almost_equal(fc[1], 0.835048, 6)
assert_almost_equal(fc[2][0, 0], 1.129023, 6)
assert_almost_equal(fc[2][0, 1], 4.402353, 6)
# make sure this works to. code looked fishy.
mod = ARMA(y[:-1], order=(1, 0), exog=X[:-1])
res = mod.fit(trend='c', disp=False)
fc = res.forecast(exog=X[-1:])
assert_almost_equal(fc[0], 2.481219, 6)
assert_almost_equal(fc[1], 0.968759, 6)
assert_almost_equal(fc[2][0], [0.582485, 4.379952], 6)
@pytest.mark.not_vetted
def test_small_data():
# GH#1146
y = [-1214.360173, -1848.209905, -2100.918158, -3647.483678, -4711.186773]
# refuse to estimate these
with pytest.raises(ValueError):
ARIMA(y, (2, 0, 3))
with pytest.raises(ValueError):
ARIMA(y, (1, 1, 3))
mod = ARIMA(y, (1, 0, 3))
with pytest.raises(ValueError):
mod.fit(trend="c")
# TODO: mark these as smoke?
# try to estimate these...leave it up to the user to check for garbage
# and be clear, these are garbage parameters.
# X-12 arima will estimate, gretl refuses to estimate likely a problem
# in start params regression.
mod.fit(trend="nc", disp=0, start_params=[.1, .1, .1, .1])
mod = ARIMA(y, (1, 0, 2))
with warnings.catch_warnings():
warnings.simplefilter("ignore")
mod.fit(disp=0, start_params=[np.mean(y), .1, .1, .1])
@pytest.mark.not_vetted
class TestARMA00(object):
@classmethod
def setup_class(cls):
sunspots = datasets.sunspots.load_pandas().data['SUNACTIVITY'].values
cls.y = y = sunspots
cls.arma_00_model = ARMA(y, order=(0, 0))
cls.arma_00_res = cls.arma_00_model.fit(disp=-1)
def test_parameters(self):
params = self.arma_00_res.params
assert_almost_equal(self.y.mean(), params)
def test_predictions(self):
predictions = self.arma_00_res.predict()
assert_almost_equal(self.y.mean() * np.ones_like(predictions),
predictions)
def test_arroots(self): # TODO: Belongs in test_wold?
# GH#4559
# regression test; older implementation of arroots returned None
# instead of en empty array
roots = self.arma_00_res.arroots
assert roots.size == 0
def test_maroots(self): # TODO: Belongs in test_wold?
# GH#4559
# regression test; older implementation of arroots returned None
# instead of en empty array
roots = self.arma_00_res.maroots
assert roots.size == 0
@pytest.mark.skip(reason=' This test is invalid since the ICs differ due '
'to df_model differences between OLS and ARIMA')
def test_information_criteria(self):
# This test is invalid since the ICs differ due to df_model differences
# between OLS and ARIMA
res = self.arma_00_res
y = self.y
ols_res = OLS(y, np.ones_like(y)).fit(disp=-1)
ols_ic = np.array([ols_res.aic, ols_res.bic])
arma_ic = np.array([res.aic, res.bic])
assert_almost_equal(ols_ic, arma_ic, DECIMAL_4)
def test_arma_00_nc(self):
arma_00 = ARMA(self.y, order=(0, 0))
with pytest.raises(ValueError):
arma_00.fit(trend='nc', disp=-1)
def test_css(self):
arma = ARMA(self.y, order=(0, 0))
fit = arma.fit(method='css', disp=-1)
predictions = fit.predict()
assert_almost_equal(self.y.mean() * np.ones_like(predictions),
predictions)
def test_arima(self):
yi = np.cumsum(self.y)
arima = ARIMA(yi, order=(0, 1, 0))
fit = arima.fit(disp=-1)
assert_almost_equal(np.diff(yi).mean(),
fit.params,
DECIMAL_4)
def test_arma_ols(self):
y = self.y
y_lead = y[1:]
y_lag = y[:-1]
T = y_lag.shape[0]
X = np.hstack((np.ones((T, 1)), y_lag[:, None]))
ols_res = OLS(y_lead, X).fit()
arma_res = ARMA(y_lead, order=(0, 0), exog=y_lag).fit(trend='c',
disp=-1)
assert_almost_equal(ols_res.params, arma_res.params)
def test_arma_exog_no_constant(self):
y = self.y
y_lead = y[1:]
y_lag = y[:-1]
X = y_lag[:, None]
ols_res = OLS(y_lead, X).fit()
arma_res = ARMA(y_lead, order=(0, 0), exog=y_lag).fit(trend='nc',
disp=-1)
assert_almost_equal(ols_res.params, arma_res.params)
@pytest.mark.not_vetted
def test_arima_dates_startatend():
# TODO: GH reference?
np.random.seed(18)
x = pd.Series(np.random.random(36),
index=pd.DatetimeIndex(start='1/1/1990',
periods=36, freq='M'))
res = ARIMA(x, (1, 0, 0)).fit(disp=0)
pred = res.predict(start=len(x), end=len(x))
assert pred.index[0] == x.index.shift(1)[-1]
fc = res.forecast()[0]
assert_almost_equal(pred.values[0], fc)
@pytest.mark.not_vetted
def test_arima_diff2():
dta = datasets.macrodata.load_pandas().data['cpi']
dta.index = cpi_dates
mod = ARIMA(dta, (3, 2, 1)).fit(disp=-1)
fc, fcerr, conf_int = mod.forecast(10)
# forecasts from gretl
conf_int_res = [(216.139, 219.231),
(216.472, 221.520),
(217.064, 223.649),
(217.586, 225.727),
(218.119, 227.770),
(218.703, 229.784),
(219.306, 231.777),
(219.924, 233.759),
(220.559, 235.735),
(221.206, 237.709)]
fc_res = [217.685, 218.996, 220.356, 221.656, 222.945,
224.243, 225.541, 226.841, 228.147, 229.457]
fcerr_res = [0.7888, 1.2878, 1.6798, 2.0768, 2.4620,
2.8269, 3.1816, 3.52950, 3.8715, 4.2099]
assert_almost_equal(fc, fc_res, 3)
assert_almost_equal(fcerr, fcerr_res, 3)
assert_almost_equal(conf_int, conf_int_res, 3)
predicted = mod.predict('2008Q1', '2012Q1', typ='levels')
predicted_res = [214.464, 215.478, 221.277, 217.453, 212.419, 213.530,
215.087, 217.685, 218.996, 220.356, 221.656, 222.945,
224.243, 225.541, 226.841, 228.147, 229.457]
assert_almost_equal(predicted, predicted_res, 3)
@pytest.mark.not_vetted
def test_arima111_predict_exog_2127():
# regression test for issue GH#2127
ef = [0.03005, 0.03917, 0.02828, 0.03644, 0.03379, 0.02744,
0.03343, 0.02621, 0.03050, 0.02455, 0.03261, 0.03507,
0.02734, 0.05373, 0.02677, 0.03443, 0.03331, 0.02741,
0.03709, 0.02113, 0.03343, 0.02011, 0.03675, 0.03077,
0.02201, 0.04844, 0.05518, 0.03765, 0.05433, 0.03049,
0.04829, 0.02936, 0.04421, 0.02457, 0.04007, 0.03009,
0.04504, 0.05041, 0.03651, 0.02719, 0.04383, 0.02887,
0.03440, 0.03348, 0.02364, 0.03496, 0.02549, 0.03284,
0.03523, 0.02579, 0.03080, 0.01784, 0.03237, 0.02078,
0.03508, 0.03062, 0.02006, 0.02341, 0.02223, 0.03145,
0.03081, 0.02520, 0.02683, 0.01720, 0.02225, 0.01579,
0.02237, 0.02295, 0.01830, 0.02356, 0.02051, 0.02932,
0.03025, 0.02390, 0.02635, 0.01863, 0.02994, 0.01762,
0.02837, 0.02421, 0.01951, 0.02149, 0.02079, 0.02528,
0.02575, 0.01634, 0.02563, 0.01719, 0.02915, 0.01724,
0.02804, 0.02750, 0.02099, 0.02522, 0.02422, 0.03254,
0.02095, 0.03241, 0.01867, 0.03998, 0.02212, 0.03034,
0.03419, 0.01866, 0.02623, 0.02052]
ue = [4.9, 5.0, 5.0, 5.0, 4.9, 4.7, 4.8, 4.7, 4.7,
4.6, 4.6, 4.7, 4.7, 4.5, 4.4, 4.5, 4.4, 4.6,
4.5, 4.4, 4.5, 4.4, 4.6, 4.7, 4.6, 4.7, 4.7,
4.7, 5.0, 5.0, 4.9, 5.1, 5.0, 5.4, 5.6, 5.8,
6.1, 6.1, 6.5, 6.8, 7.3, 7.8, 8.3, 8.7, 9.0,
9.4, 9.5, 9.5, 9.6, 9.8, 10., 9.9, 9.9, 9.7,
9.8, 9.9, 9.9, 9.6, 9.4, 9.5, 9.5, 9.5, 9.5,
9.8, 9.4, 9.1, 9.0, 9.0, 9.1, 9.0, 9.1, 9.0,
9.0, 9.0, 8.8, 8.6, 8.5, 8.2, 8.3, 8.2, 8.2,
8.2, 8.2, 8.2, 8.1, 7.8, 7.8, 7.8, 7.9, 7.9,
7.7, 7.5, 7.5, 7.5, 7.5, 7.3, 7.2, 7.2, 7.2,
7.0, 6.7, 6.6, 6.7, 6.7, 6.3, 6.3]
ue = np.array(ue) / 100
model = ARIMA(ef, (1, 1, 1), exog=ue)
res = model.fit(transparams=False, pgtol=1e-8, iprint=0, disp=0)
assert res.mle_retvals['warnflag'] == 0
predicts = res.predict(start=len(ef), end=len(ef) + 10,
exog=ue[-11:], typ='levels')
# regression test, not verified numbers
predicts_res = np.array([
0.02591095, 0.02321325, 0.02436579, 0.02368759, 0.02389753,
0.02372, 0.0237481, 0.0236738, 0.023644, 0.0236283,
0.02362267])
| assert_allclose(predicts, predicts_res, atol=5e-6) | numpy.testing.assert_allclose |
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from scipy.signal import find_peaks
import scipy.signal
from ..signal import (signal_zerocrossings,
signal_resample,
signal_detrend,
signal_smooth,
signal_filter,
signal_findpeaks,
signal_formatpeaks)
from .ecg_peaks import ecg_peaks
from .ecg_segment import ecg_segment
from ..epochs import epochs_create
from ..epochs import epochs_to_df
from ..events import events_plot
from ..stats import standardize
def ecg_delineate(ecg_cleaned, rpeaks=None, sampling_rate=1000, method="peak", show=False, show_type='peaks', check=False):
"""Delineate QRS complex.
Function to delineate the QRS complex.
- **Cardiac Cycle**: A typical ECG heartbeat consists of a P wave, a QRS complex and a T wave.The P wave represents the wave of depolarization that spreads from the SA-node throughout the atria. The QRS complex reflects the rapid depolarization of the right and left ventricles. Since the ventricles are the largest part of the heart, in terms of mass, the QRS complex usually has a much larger amplitude than the P-wave. The T wave represents the ventricular repolarization of the ventricles. On rare occasions, a U wave can be seen following the T wave. The U wave is believed to be related to the last remnants of ventricular repolarization.
Parameters
----------
ecg_cleaned : list, array or Series
The cleaned ECG channel as returned by `ecg_clean()`.
rpeaks : list, array or Series
The samples at which R-peaks occur. Accessible with the key "ECG_R_Peaks" in the info dictionary returned by `ecg_findpeaks()`.
sampling_rate : int
The sampling frequency of `ecg_signal` (in Hz, i.e., samples/second).
Defaults to 500.
method : str
Can be one of 'peak' (default) for a peak-based method, 'cwt' for continuous wavelet transform or 'dwt' for discrete wavelet transform.
show : bool
If True, will return a plot to visualizing the delineated waves
information.
show_type: str
The type of delineated waves information showed in the plot
Returns
-------
waves : dict
A dictionary containing additional information.
For derivative method, the dictionary contains the
samples at which P-peaks, Q-peaks, S-peaks, T-peaks, P-onsets and T-
offsets occur, accessible with the key "ECG_P_Peaks", "ECG_Q_Peaks",
"ECG_S_Peaks", "ECG_T_Peaks", "ECG_P_Onsets", "ECG_T_Offsets"
respectively.
For wavelet methods, the dictionary contains the samples at
which P-peaks, T-peaks, P-onsets, P-offsets, T-onsets, T-offsets, QRS-
onsets and QRS-offsets occur, accessible with the key "ECG_P_Peaks",
"ECG_T_Peaks", "ECG_P_Onsets", "ECG_P_Offsets", "ECG_T_Onsets",
"ECG_T_Offsets", "ECG_R_Onsets", "ECG_R_Offsets" respectively.
signals : DataFrame
A DataFrame of same length as the input signal in which occurences of
peaks, onsets and offsets marked as "1" in a list of zeros.
See Also
--------
ecg_clean, ecg_fixpeaks, ecg_peaks, ecg_rate, ecg_process, ecg_plot
Examples
--------
>>> import neurokit2 as nk
>>>
>>> ecg = nk.ecg_simulate(duration=10, sampling_rate=1000)
>>> cleaned = nk.ecg_clean(ecg, sampling_rate=1000)
>>> _, rpeaks = nk.ecg_peaks(cleaned)
>>> signals, waves = nk.ecg_delineate(cleaned, rpeaks, sampling_rate=1000, method="peak")
>>> nk.events_plot(waves["ECG_P_Peaks"], cleaned)
>>> nk.events_plot(waves["ECG_T_Peaks"], cleaned)
References
--------------
- <NAME>., <NAME>., <NAME>., <NAME>., & <NAME>. (2004). A wavelet-based ECG delineator: evaluation on standard databases. IEEE Transactions on biomedical engineering, 51(4), 570-581.
"""
# Sanitize input for ecg_cleaned
if isinstance(ecg_cleaned, pd.DataFrame):
cols = [col for col in ecg_cleaned.columns if 'ECG_Clean' in col]
if len(cols) == 0:
raise ValueError("NeuroKit error: ecg_delineate(): Wrong input,"
"we couldn't extract cleaned signal.")
else:
ecg_cleaned = ecg_cleaned[cols[0]].values
elif isinstance(ecg_cleaned, dict):
for i in ecg_cleaned:
cols = [col for col in ecg_cleaned[i].columns if 'ECG_Clean' in col]
if len(cols) == 0:
raise ValueError("NeuroKit error: ecg_delineate(): Wrong input,"
"we couldn't extract cleaned signal.")
else:
signals = epochs_to_df(ecg_cleaned)
ecg_cleaned = signals[cols[0]].values
# Sanitize input for rpeaks
if rpeaks is None:
_, rpeaks = ecg_peaks(ecg_cleaned, sampling_rate=sampling_rate)
rpeaks = rpeaks["ECG_R_Peaks"]
if isinstance(rpeaks, dict):
rpeaks = rpeaks["ECG_R_Peaks"]
method = method.lower() # remove capitalised letters
if method in ["peak", "peaks", "derivative", "gradient"]:
waves = _ecg_delineator_peak(ecg_cleaned,
rpeaks=rpeaks,
sampling_rate=sampling_rate)
elif method in ["cwt", "continuous wavelet transform"]:
waves = _ecg_delinator_cwt(ecg_cleaned,
rpeaks=rpeaks,
sampling_rate=sampling_rate)
elif method in ["dwt", "discrete wavelet transform"]:
waves = _dwt_ecg_delinator(ecg_cleaned,
rpeaks,
sampling_rate=sampling_rate)
else:
raise ValueError("NeuroKit error: ecg_delineate(): 'method' should be "
"one of 'peak', 'cwt' or 'dwt'.")
# Remove NaN in Peaks, Onsets, and Offsets
waves_noNA = waves.copy()
for feature in waves_noNA.keys():
waves_noNA[feature] = [int(x) for x in waves_noNA[feature] if ~np.isnan(x)]
instant_peaks = signal_formatpeaks(waves_noNA,
desired_length=len(ecg_cleaned))
signals = instant_peaks
if show is True:
_ecg_delineate_plot(ecg_cleaned, rpeaks=rpeaks, signals=signals, signal_features_type=show_type, sampling_rate=sampling_rate)
if check is True:
waves = _ecg_delineate_check(waves, rpeaks)
return signals, waves
# =============================================================================
# WAVELET METHOD (DWT)
# =============================================================================
def _dwt_resample_points(peaks, sampling_rate, desired_sampling_rate):
"""Resample given points to a different sampling rate."""
peaks_resample = (np.array(peaks) * desired_sampling_rate / sampling_rate)
peaks_resample = [np.nan if np.isnan(x) else int(x) for x in peaks_resample.tolist()]
return peaks_resample
def _dwt_ecg_delinator(ecg, rpeaks, sampling_rate, analysis_sampling_rate=2000):
"""Delinate ecg signal using discrete wavelet transforms.
Args:
ecg: Signal.
sampling_rate: Sampling rate of input signal.
analysis_sampling_rate: Sampling rate for analysis.
Returns:
Dictionary of the points.
"""
ecg = signal_resample(ecg, sampling_rate=sampling_rate, desired_sampling_rate=analysis_sampling_rate)
dwtmatr = _dwt_compute_multiscales(ecg, 9)
# # only for debugging
# for idx in [0, 1, 2, 3]:
# plt.plot(dwtmatr[idx + 3], label=f'W[{idx}]')
# plt.plot(ecg, '--')
# plt.legend()
# plt.grid(True)
# plt.show()
rpeaks_resampled = _dwt_resample_points(rpeaks, sampling_rate, analysis_sampling_rate)
tpeaks, ppeaks = _dwt_delinate_tp_peaks(
ecg, rpeaks_resampled, dwtmatr, sampling_rate=analysis_sampling_rate, debug=False)
qrs_onsets, qrs_offsets = _dwt_delinate_qrs_bounds(
ecg, rpeaks_resampled, dwtmatr, ppeaks, tpeaks, sampling_rate=analysis_sampling_rate, debug=False)
ponsets, poffsets = _dwt_delinate_tp_onsets_offsets(
ecg, ppeaks, dwtmatr, sampling_rate=analysis_sampling_rate, debug=False)
tonsets, toffsets = _dwt_delinate_tp_onsets_offsets(
ecg, tpeaks, dwtmatr, sampling_rate=analysis_sampling_rate, debug=False,
onset_weight=0.6, duration=0.6
)
return dict(
ECG_T_Peaks=_dwt_resample_points(tpeaks, analysis_sampling_rate, desired_sampling_rate=sampling_rate),
ECG_T_Onsets=_dwt_resample_points(tonsets, analysis_sampling_rate, desired_sampling_rate=sampling_rate),
ECG_T_Offsets=_dwt_resample_points(toffsets, analysis_sampling_rate, desired_sampling_rate=sampling_rate),
ECG_P_Peaks=_dwt_resample_points(ppeaks, analysis_sampling_rate, desired_sampling_rate=sampling_rate),
ECG_P_Onsets=_dwt_resample_points(ponsets, analysis_sampling_rate, desired_sampling_rate=sampling_rate),
ECG_P_Offsets=_dwt_resample_points(poffsets, analysis_sampling_rate, desired_sampling_rate=sampling_rate),
ECG_R_Onsets=_dwt_resample_points(qrs_onsets, analysis_sampling_rate, desired_sampling_rate=sampling_rate),
ECG_R_Offsets=_dwt_resample_points(qrs_offsets, analysis_sampling_rate, desired_sampling_rate=sampling_rate),
)
def _dwt_compensate_degree(sampling_rate):
return int(np.log2(sampling_rate / 250))
def _dwt_delinate_tp_peaks(ecg, rpeaks, dwtmatr, sampling_rate=250, debug=False,
dwt_delay=0.0,
qrs_width=0.13,
p2r_duration=0.2,
rt_duration=0.25,
degree_tpeak=3,
degree_ppeak=2,
epsilon_T_weight=0.25,
epsilon_P_weight=0.02):
srch_bndry = int(0.5 * qrs_width * sampling_rate)
degree_add = _dwt_compensate_degree(sampling_rate)
tpeaks = []
for i in range(len(rpeaks)):
if np.isnan(rpeaks[i]):
tpeaks.append(np.nan)
continue
# search for T peaks from R peaks
srch_idx_start = rpeaks[i] + srch_bndry
srch_idx_end = rpeaks[i] + 2 * int(rt_duration * sampling_rate)
dwt_local = dwtmatr[degree_tpeak + degree_add, srch_idx_start:srch_idx_end]
height = epsilon_T_weight * np.sqrt(np.mean(np.square(dwt_local)))
if len(dwt_local) == 0:
tpeaks.append(np.nan)
continue
ecg_local = ecg[srch_idx_start:srch_idx_end]
peaks, peak_heights = scipy.signal.find_peaks(np.abs(dwt_local), height=height)
peaks = list(filter(lambda p: np.abs(dwt_local[p]) > 0.025 * max(dwt_local), peaks))
if dwt_local[0] > 0: # just append
peaks = [0] + peaks
# detect morphology
candidate_peaks = []
candidate_peaks_scores = []
for idx_peak, idx_peak_nxt in zip(peaks[:-1], peaks[1:]):
correct_sign = dwt_local[idx_peak] > 0 and dwt_local[idx_peak_nxt] < 0
if correct_sign:
idx_zero = signal_zerocrossings(dwt_local[idx_peak: idx_peak_nxt])[0] + idx_peak
# This is the score assigned to each peak. The peak with the highest score will be
# selected.
score = ecg_local[idx_zero] \
- (float(idx_zero) / sampling_rate - (rt_duration - 0.5 * qrs_width))
candidate_peaks.append(idx_zero)
candidate_peaks_scores.append(score)
if len(candidate_peaks) == 0:
tpeaks.append(np.nan)
continue
tpeaks.append(candidate_peaks[np.argmax(candidate_peaks_scores)] + srch_idx_start)
ppeaks = []
for i in range(len(rpeaks)):
if np.isnan(rpeaks[i]):
ppeaks.append(np.nan)
continue
# search for P peaks from Rpeaks
srch_idx_start = rpeaks[i] - 2 * int(p2r_duration * sampling_rate)
srch_idx_end = rpeaks[i] - srch_bndry
dwt_local = dwtmatr[degree_ppeak + degree_add, srch_idx_start:srch_idx_end]
height = epsilon_P_weight * np.sqrt(np.mean(np.square(dwt_local)))
if len(dwt_local) == 0:
ppeaks.append(np.nan)
continue
ecg_local = ecg[srch_idx_start:srch_idx_end]
peaks, peak_heights = scipy.signal.find_peaks(np.abs(dwt_local), height=height)
peaks = list(filter(lambda p: np.abs(dwt_local[p]) > 0.025 * max(dwt_local), peaks))
if dwt_local[0] > 0: # just append
peaks = [0] + peaks
# detect morphology
candidate_peaks = []
candidate_peaks_scores = []
for idx_peak, idx_peak_nxt in zip(peaks[:-1], peaks[1:]):
correct_sign = dwt_local[idx_peak] > 0 and dwt_local[idx_peak_nxt] < 0
if correct_sign:
idx_zero = signal_zerocrossings(dwt_local[idx_peak: idx_peak_nxt])[0] + idx_peak
# This is the score assigned to each peak. The peak with the highest score will be
# selected.
score = ecg_local[idx_zero] \
- abs(float(idx_zero) / sampling_rate - p2r_duration) # Minus p2r because of the srch_idx_start
candidate_peaks.append(idx_zero)
candidate_peaks_scores.append(score)
if len(candidate_peaks) == 0:
ppeaks.append(np.nan)
continue
ppeaks.append(candidate_peaks[np.argmax(candidate_peaks_scores)] + srch_idx_start)
return tpeaks, ppeaks
def _dwt_delinate_tp_onsets_offsets(ecg, peaks, dwtmatr, sampling_rate=250, debug=False,
duration=0.3,
duration_offset=0.3,
onset_weight=0.4,
offset_weight=0.4,
degree_onset=2,
degree_offset=2):
degree = _dwt_compensate_degree(sampling_rate)
onsets = []
offsets = []
for i in range(len(peaks)):
if np.isnan(peaks[i]):
onsets.append(np.nan)
offsets.append(np.nan)
continue
# look for onsets
srch_idx_start = peaks[i] - int(duration * sampling_rate)
srch_idx_end = peaks[i]
if srch_idx_start is np.nan or srch_idx_end is np.nan:
onsets.append(np.nan)
offsets.append(np.nan)
continue
dwt_local = dwtmatr[degree_onset + degree, srch_idx_start: srch_idx_end]
onset_slope_peaks, onset_slope_data = scipy.signal.find_peaks(dwt_local)
try:
epsilon_onset = onset_weight * dwt_local[onset_slope_peaks[-1]]
candidate_onsets = np.where(dwt_local[:onset_slope_peaks[-1]] < epsilon_onset)[0]
onsets.append(candidate_onsets[-1] + srch_idx_start)
except IndexError:
onsets.append(np.nan)
# # only for debugging
# events_plot([candidate_onsets, onset_slope_peaks], dwt_local)
# plt.plot(ecg[srch_idx_start: srch_idx_end], '--', label='ecg')
# plt.show()
# look for offset
srch_idx_start = peaks[i]
srch_idx_end = peaks[i] + int(duration_offset * sampling_rate)
if srch_idx_start is np.nan or srch_idx_end is np.nan:
offsets.append(np.nan)
continue
dwt_local = dwtmatr[degree_offset + degree, srch_idx_start: srch_idx_end]
offset_slope_peaks, offset_slope_data = scipy.signal.find_peaks(-dwt_local)
try:
epsilon_offset = - offset_weight * dwt_local[offset_slope_peaks[0]]
candidate_offsets = np.where(-dwt_local[offset_slope_peaks[0]:] < epsilon_offset)[0] + offset_slope_peaks[0]
offsets.append(candidate_offsets[0] + srch_idx_start)
except IndexError:
offsets.append(np.nan)
# # only for debugging
# events_plot([candidate_offsets, offset_slope_peaks], dwt_local)
# plt.plot(ecg[srch_idx_start: srch_idx_end], '--', label='ecg')
# plt.show()
return onsets, offsets
def _dwt_delinate_qrs_bounds(ecg, rpeaks, dwtmatr, ppeaks, tpeaks, sampling_rate=250, debug=False):
degree = int(np.log2(sampling_rate / 250))
onsets = []
for i in range(len(rpeaks)):
# look for onsets
srch_idx_start = ppeaks[i]
srch_idx_end = rpeaks[i]
if srch_idx_start is np.nan or srch_idx_end is np.nan:
onsets.append(np.nan)
continue
dwt_local = dwtmatr[2 + degree, srch_idx_start: srch_idx_end]
onset_slope_peaks, onset_slope_data = scipy.signal.find_peaks(-dwt_local)
epsilon_onset = 0.5 * -dwt_local[onset_slope_peaks[-1]]
candidate_onsets = np.where(- dwt_local[:onset_slope_peaks[-1]] < epsilon_onset)[0]
onsets.append(candidate_onsets[-1] + srch_idx_start)
# # only for debugging
# events_plot(candidate_onsets, -dwt_local)
# plt.plot(ecg[srch_idx_start: srch_idx_end], '--', label='ecg')
# plt.legend()
# plt.show()
offsets = []
for i in range(len(rpeaks)):
# look for offsets
srch_idx_start = rpeaks[i]
srch_idx_end = tpeaks[i]
if srch_idx_start is np.nan or srch_idx_end is np.nan:
offsets.append(np.nan)
continue
dwt_local = dwtmatr[2 + degree, srch_idx_start: srch_idx_end]
onset_slope_peaks, onset_slope_data = scipy.signal.find_peaks(dwt_local)
if len(onset_slope_peaks) == 0:
offsets.append(np.nan)
continue
epsilon_offset = 0.5 * dwt_local[onset_slope_peaks[0]]
if not (dwt_local[onset_slope_peaks[0]:] < epsilon_offset).any():
offsets.append(np.nan)
continue
candidate_offsets = np.where(dwt_local[onset_slope_peaks[0]:] < epsilon_offset)[0] + onset_slope_peaks[0]
offsets.append(candidate_offsets[0] + srch_idx_start)
return onsets, offsets
def _dwt_compute_multiscales(ecg: np.ndarray, max_degree):
"""Return multiscales wavelet transforms.
Args:
ecg (FIXME): FIXME
max_degree (FIXME): FIXME
Returns:
out (FIXME): FIXME
"""
def _apply_H_filter(signal_i, power=0):
zeros = np.zeros(2 ** power - 1)
timedelay = 2 ** power
banks = np.r_[
1.0 / 8, zeros, 3.0 / 8, zeros, 3.0 / 8, zeros, 1.0 / 8,
]
signal_f = scipy.signal.convolve(signal_i, banks, mode='full')
signal_f[:-timedelay] = signal_f[timedelay:] # timeshift: 2 steps
return signal_f
def _apply_G_filter(signal_i, power=0):
zeros = np.zeros(2 ** power - 1)
timedelay = 2 ** power
banks = np.r_[2, zeros, -2]
signal_f = scipy.signal.convolve(signal_i, banks, mode='full')
signal_f[:-timedelay] = signal_f[timedelay:] # timeshift: 1 step
return signal_f
dwtmatr = []
intermediate_ret = np.array(ecg)
for deg in range(max_degree):
S_deg = _apply_G_filter(intermediate_ret, power=deg)
T_deg = _apply_H_filter(intermediate_ret, power=deg)
dwtmatr.append(S_deg)
intermediate_ret = np.array(T_deg)
dwtmatr = [arr[:len(ecg)] for arr in dwtmatr] # rescale transforms to the same length
return np.array(dwtmatr)
# =============================================================================
# WAVELET METHOD (CWT)
# =============================================================================
def _ecg_delinator_cwt(ecg, rpeaks=None, sampling_rate=1000):
# P-Peaks and T-Peaks
tpeaks, ppeaks = _peaks_delineator(ecg, rpeaks,
sampling_rate=sampling_rate)
# qrs onsets and offsets
qrs_onsets, qrs_offsets = _onset_offset_delineator(ecg, rpeaks,
peak_type="rpeaks",
sampling_rate=sampling_rate)
# ppeaks onsets and offsets
p_onsets, p_offsets = _onset_offset_delineator(ecg, ppeaks,
peak_type="ppeaks",
sampling_rate=sampling_rate)
# tpeaks onsets and offsets
t_onsets, t_offsets = _onset_offset_delineator(ecg, tpeaks,
peak_type="tpeaks",
sampling_rate=sampling_rate)
info = {"ECG_P_Peaks": ppeaks,
"ECG_T_Peaks": tpeaks,
"ECG_R_Onsets": qrs_onsets,
"ECG_R_Offsets": qrs_offsets,
"ECG_P_Onsets": p_onsets,
"ECG_P_Offsets": p_offsets,
"ECG_T_Onsets": t_onsets,
"ECG_T_Offsets": t_offsets}
return info
# Internals
# ---------------------
def _onset_offset_delineator(ecg, peaks, peak_type="rpeaks", sampling_rate=1000):
# Try loading pywt
try:
import pywt
except ImportError:
raise ImportError("NeuroKit error: ecg_delineator(): the 'PyWavelets' "
"module is required for this method to run. ",
"Please install it first (`pip install PyWavelets`).")
# first derivative of the Gaissian signal
scales = np.array([1, 2, 4, 8, 16])
cwtmatr, freqs = pywt.cwt(ecg, scales, 'gaus1', sampling_period=1.0/sampling_rate)
half_wave_width = int(0.1*sampling_rate) # NEED TO CHECK
onsets = []
offsets = []
for index_peak in peaks:
# find onset
if peak_type == "rpeaks":
search_window = cwtmatr[2, index_peak - half_wave_width: index_peak]
prominence = 0.20*max(search_window)
height = 0.0
wt_peaks, wt_peaks_data = find_peaks(search_window, height=height,
prominence=prominence)
elif peak_type == "tpeaks" or peak_type == "ppeaks":
search_window = - cwtmatr[4, index_peak - half_wave_width: index_peak]
prominence = 0.10*max(search_window)
height = 0.0
wt_peaks, wt_peaks_data = find_peaks(search_window, height=height,
prominence=prominence)
if len(wt_peaks) == 0:
# print("Fail to find onset at index: %d", index_peak)
continue
# The last peak is nfirst in (Martinez, 2004)
nfirst = wt_peaks[-1] + index_peak - half_wave_width
if peak_type == "rpeaks":
if wt_peaks_data['peak_heights'][-1] > 0:
epsilon_onset = 0.05 * wt_peaks_data['peak_heights'][-1]
elif wt_peaks_data['peak_heights'][-1] > 0:
epsilon_onset = 0.07 * wt_peaks_data['peak_heights'][-1]
elif peak_type == "ppeaks":
epsilon_onset = 0.50 * wt_peaks_data['peak_heights'][-1]
elif peak_type == "tpeaks":
epsilon_onset = 0.25 * wt_peaks_data['peak_heights'][-1]
leftbase = wt_peaks_data['left_bases'][-1] + index_peak - half_wave_width
if peak_type == "rpeaks":
candidate_onsets = np.where(cwtmatr[2, nfirst-100: nfirst] <
epsilon_onset)[0] + nfirst - 100
elif peak_type == "tpeaks" or peak_type == "ppeaks":
candidate_onsets = np.where(-cwtmatr[4, nfirst-100: nfirst] <
epsilon_onset)[0] + nfirst - 100
candidate_onsets = candidate_onsets.tolist() + [leftbase]
if len(candidate_onsets) == 0:
onsets.append(np.nan)
else:
onsets.append(max(candidate_onsets))
# find offset
if peak_type == "rpeaks":
search_window = - cwtmatr[2, index_peak: index_peak + half_wave_width]
prominence = 0.50*max(search_window)
wt_peaks, wt_peaks_data = scipy.signal.find_peaks(search_window, height=height,
prominence=prominence)
elif peak_type == "tpeaks" or peak_type == "ppeaks":
search_window = cwtmatr[4, index_peak: index_peak + half_wave_width]
prominence = 0.10*max(search_window)
wt_peaks, wt_peaks_data = find_peaks(search_window, height=height,
prominence=prominence)
if len(wt_peaks) == 0:
# print("Fail to find offsets at index: %d", index_peak)
continue
nlast = wt_peaks[0] + index_peak
if peak_type == "rpeaks":
if wt_peaks_data['peak_heights'][0] > 0:
epsilon_offset = 0.125 * wt_peaks_data['peak_heights'][0]
elif wt_peaks_data['peak_heights'][0] > 0:
epsilon_offset = 0.71 * wt_peaks_data['peak_heights'][0]
elif peak_type == "ppeaks":
epsilon_offset = 0.9 * wt_peaks_data['peak_heights'][0]
elif peak_type == "tpeaks":
epsilon_offset = 0.4 * wt_peaks_data['peak_heights'][0]
rightbase = wt_peaks_data['right_bases'][0] + index_peak
if peak_type == "rpeaks":
candidate_offsets = np.where((-cwtmatr[2, nlast: nlast + 100]) <
epsilon_offset)[0] + nlast
elif peak_type == "tpeaks" or peak_type == "ppeaks":
candidate_offsets = np.where((cwtmatr[4, nlast: nlast + 100]) <
epsilon_offset)[0] + nlast
candidate_offsets = candidate_offsets.tolist() + [rightbase]
if len(candidate_offsets) == 0:
offsets.append(np.nan)
else:
offsets.append(min(candidate_offsets))
onsets = np.array(onsets, dtype='int')
offsets = np.array(offsets, dtype='int')
return onsets, offsets
def _peaks_delineator(ecg, rpeaks, cleaning=False, sampling_rate=1000):
# Try loading pywt
try:
import pywt
except ImportError:
raise ImportError("NeuroKit error: ecg_delineator(): the 'PyWavelets' "
"module is required for this method to run. ",
"Please install it first (`pip install PyWavelets`).")
# first derivative of the Gaissian signal
scales = np.array([1, 2, 4, 8, 16])
cwtmatr, freqs = pywt.cwt(ecg, scales, 'gaus1', sampling_period=1.0/sampling_rate)
qrs_duration = 0.1
search_boundary = int(0.9 * qrs_duration * sampling_rate / 2)
significant_peaks_groups = []
tppeaks_pairs = []
tppeaks = []
for i in range(len(rpeaks)-1):
# search for T peaks and P peaks from R peaks
start = rpeaks[i] + search_boundary
end = rpeaks[i + 1] - search_boundary
search_window = cwtmatr[4, start:end]
height = 0.25*np.sqrt(np.mean(np.square(search_window)))
peaks_tp, heights_tp = scipy.signal.find_peaks(np.abs(search_window), height=height)
peaks_tp = peaks_tp + rpeaks[i] + search_boundary
# set threshold for heights of peaks to find significant peaks in wavelet
threshold = 0.125*max(search_window)
significant_index = []
significant_index = [j for j in range(len(peaks_tp)) if
heights_tp["peak_heights"][j] > threshold]
significant_peaks_tp = []
for index in significant_index:
significant_peaks_tp.append(peaks_tp[index])
significant_peaks_groups.append(_find_tppeaks(ecg, significant_peaks_tp, sampling_rate=sampling_rate))
tpeaks, ppeaks = zip(*[(g[0], g[-1]) for g in significant_peaks_groups])
tpeaks = np.array(tpeaks, dtype='int')
ppeaks = | np.array(ppeaks, dtype='int') | numpy.array |
"""
NeuroLearn Statistics Tools
===========================
Tools to help with statistical analyses.
"""
__all__ = [
"pearson",
"zscore",
"fdr",
"holm_bonf",
"threshold",
"multi_threshold",
"winsorize",
"trim",
"calc_bpm",
"downsample",
"upsample",
"fisher_r_to_z",
"fisher_z_to_r",
"one_sample_permutation",
"two_sample_permutation",
"correlation_permutation",
"matrix_permutation",
"make_cosine_basis",
"summarize_bootstrap",
"regress",
"procrustes",
"procrustes_distance",
"align",
"find_spikes",
"correlation",
"distance_correlation",
"transform_pairwise",
"double_center",
"u_center",
"_bootstrap_isc",
"isc",
"isfc",
"isps",
"_compute_matrix_correlation",
"_phase_mean_angle",
"_phase_vector_length",
"_butter_bandpass_filter",
"_phase_rayleigh_p",
"align_states",
]
import numpy as np
from numpy.fft import fft, ifft
import pandas as pd
from scipy.stats import pearsonr, spearmanr, kendalltau, norm
from scipy.stats import t as t_dist
from scipy.spatial.distance import squareform, pdist
from scipy.linalg import orthogonal_procrustes
from scipy.spatial import procrustes as procrust
from scipy.signal import hilbert, butter, filtfilt
from scipy.optimize import linear_sum_assignment
from copy import deepcopy
import nibabel as nib
from scipy.interpolate import interp1d
import warnings
import itertools
from joblib import Parallel, delayed
from .utils import attempt_to_import, check_square_numpy_matrix
from .external.srm import SRM, DetSRM
from sklearn.utils import check_random_state
from sklearn.metrics import pairwise_distances
MAX_INT = np.iinfo(np.int32).max
# Optional dependencies
sm = attempt_to_import("statsmodels.tsa.arima_model", name="sm")
def pearson(x, y):
"""Correlates row vector x with each row vector in 2D array y.
From neurosynth.stats.py - author: <NAME>
"""
data = np.vstack((x, y))
ms = data.mean(axis=1)[(slice(None, None, None), None)]
datam = data - ms
datass = np.sqrt(np.sum(datam * datam, axis=1))
# datass = np.sqrt(ss(datam, axis=1))
temp = np.dot(datam[1:], datam[0].T)
return temp / (datass[1:] * datass[0])
def zscore(df):
"""zscore every column in a pandas dataframe or series.
Args:
df: (pd.DataFrame) Pandas DataFrame instance
Returns:
z_data: (pd.DataFrame) z-scored pandas DataFrame or series instance
"""
if isinstance(df, pd.DataFrame):
return df.apply(lambda x: (x - x.mean()) / x.std())
elif isinstance(df, pd.Series):
return (df - np.mean(df)) / np.std(df)
else:
raise ValueError("Data is not a Pandas DataFrame or Series instance")
def fdr(p, q=0.05):
"""Determine FDR threshold given a p value array and desired false
discovery rate q. Written by <NAME>
Args:
p: (np.array) vector of p-values
q: (float) false discovery rate level
Returns:
fdr_p: (float) p-value threshold based on independence or positive
dependence
"""
if not isinstance(p, np.ndarray):
raise ValueError("Make sure vector of p-values is a numpy array")
if any(p < 0) or any(p > 1):
raise ValueError("array contains p-values that are outside the range 0-1")
if np.any(p > 1) or np.any(p < 0):
raise ValueError("Does not include valid p-values.")
s = np.sort(p)
nvox = p.shape[0]
null = np.array(range(1, nvox + 1), dtype="float") * q / nvox
below = np.where(s <= null)[0]
return s[max(below)] if len(below) else -1
def holm_bonf(p, alpha=0.05):
"""Compute corrected p-values based on the Holm-Bonferroni method, i.e. step-down procedure applying iteratively less correction to highest p-values. A bit more conservative than fdr, but much more powerful thanvanilla bonferroni.
Args:
p: (np.array) vector of p-values
alpha: (float) alpha level
Returns:
bonf_p: (float) p-value threshold based on bonferroni
step-down procedure
"""
if not isinstance(p, np.ndarray):
raise ValueError("Make sure vector of p-values is a numpy array")
s = np.sort(p)
nvox = p.shape[0]
null = 0.05 / (nvox - np.arange(1, nvox + 1) + 1)
below = np.where(s <= null)[0]
return s[max(below)] if len(below) else -1
def threshold(stat, p, thr=0.05, return_mask=False):
"""Threshold test image by p-value from p image
Args:
stat: (Brain_Data) Brain_Data instance of arbitrary statistic metric
(e.g., beta, t, etc)
p: (Brain_Data) Brain_data instance of p-values
threshold: (float) p-value to threshold stat image
return_mask: (bool) optionall return the thresholding mask; default False
Returns:
out: Thresholded Brain_Data instance
"""
from nltools.data import Brain_Data
if not isinstance(stat, Brain_Data):
raise ValueError("Make sure stat is a Brain_Data instance")
if not isinstance(p, Brain_Data):
raise ValueError("Make sure p is a Brain_Data instance")
# Create Mask
mask = deepcopy(p)
if thr > 0:
mask.data = (mask.data < thr).astype(int)
else:
mask.data = np.zeros(len(mask.data), dtype=int)
# Apply Threshold Mask
out = deepcopy(stat)
if np.sum(mask.data) > 0:
out = out.apply_mask(mask)
out.data = out.data.squeeze()
else:
out.data = np.zeros(len(mask.data), dtype=int)
if return_mask:
return out, mask
else:
return out
def multi_threshold(t_map, p_map, thresh):
"""Threshold test image by multiple p-value from p image
Args:
stat: (Brain_Data) Brain_Data instance of arbitrary statistic metric
(e.g., beta, t, etc)
p: (Brain_Data) Brain_data instance of p-values
threshold: (list) list of p-values to threshold stat image
Returns:
out: Thresholded Brain_Data instance
"""
from nltools.data import Brain_Data
if not isinstance(t_map, Brain_Data):
raise ValueError("Make sure stat is a Brain_Data instance")
if not isinstance(p_map, Brain_Data):
raise ValueError("Make sure p is a Brain_Data instance")
if not isinstance(thresh, list):
raise ValueError("Make sure thresh is a list of p-values")
affine = t_map.to_nifti().get_affine()
pos_out = np.zeros(t_map.to_nifti().shape)
neg_out = deepcopy(pos_out)
for thr in thresh:
t = threshold(t_map, p_map, thr=thr)
t_pos = deepcopy(t)
t_pos.data = np.zeros(len(t_pos.data))
t_neg = deepcopy(t_pos)
t_pos.data[t.data > 0] = 1
t_neg.data[t.data < 0] = 1
pos_out = pos_out + t_pos.to_nifti().get_data()
neg_out = neg_out + t_neg.to_nifti().get_data()
pos_out = pos_out + neg_out * -1
return Brain_Data(nib.Nifti1Image(pos_out, affine))
def winsorize(data, cutoff=None, replace_with_cutoff=True):
"""Winsorize a Pandas DataFrame or Series with the largest/lowest value not considered outlier
Args:
data: (pd.DataFrame, pd.Series) data to winsorize
cutoff: (dict) a dictionary with keys {'std':[low,high]} or
{'quantile':[low,high]}
replace_with_cutoff: (bool) If True, replace outliers with cutoff.
If False, replaces outliers with closest
existing values; (default: False)
Returns:
out: (pd.DataFrame, pd.Series) winsorized data
"""
return _transform_outliers(
data, cutoff, replace_with_cutoff=replace_with_cutoff, method="winsorize"
)
def trim(data, cutoff=None):
"""Trim a Pandas DataFrame or Series by replacing outlier values with NaNs
Args:
data: (pd.DataFrame, pd.Series) data to trim
cutoff: (dict) a dictionary with keys {'std':[low,high]} or
{'quantile':[low,high]}
Returns:
out: (pd.DataFrame, pd.Series) trimmed data
"""
return _transform_outliers(data, cutoff, replace_with_cutoff=None, method="trim")
def _transform_outliers(data, cutoff, replace_with_cutoff, method):
"""This function is not exposed to user but is called by either trim
or winsorize.
Args:
data: (pd.DataFrame, pd.Series) data to transform
cutoff: (dict) a dictionary with keys {'std':[low,high]} or
{'quantile':[low,high]}
replace_with_cutoff: (bool) If True, replace outliers with cutoff.
If False, replaces outliers with closest
existing values. (default: False)
method: 'winsorize' or 'trim'
Returns:
out: (pd.DataFrame, pd.Series) transformed data
"""
df = data.copy() # To not overwrite data make a copy
def _transform_outliers_sub(data, cutoff, replace_with_cutoff, method="trim"):
if not isinstance(data, pd.Series):
raise ValueError(
"Make sure that you are applying winsorize to a pandas dataframe or series."
)
if isinstance(cutoff, dict):
# calculate cutoff values
if "quantile" in cutoff:
q = data.quantile(cutoff["quantile"])
elif "std" in cutoff:
std = [
data.mean() - data.std() * cutoff["std"][0],
data.mean() + data.std() * cutoff["std"][1],
]
q = pd.Series(index=cutoff["std"], data=std)
# if replace_with_cutoff is false, replace with true existing values closest to cutoff
if method == "winsorize" and not replace_with_cutoff:
q.iloc[0] = data[data > q.iloc[0]].min()
q.iloc[1] = data[data < q.iloc[1]].max()
else:
raise ValueError("cutoff must be a dictionary with quantile or std keys.")
if method == "trim":
data[data < q.iloc[0]] = np.nan
data[data > q.iloc[1]] = np.nan
elif method == "winsorize":
if isinstance(q, pd.Series) and len(q) == 2:
data[data < q.iloc[0]] = q.iloc[0]
data[data > q.iloc[1]] = q.iloc[1]
return data
# transform each column if a dataframe, if series just transform data
if isinstance(df, pd.DataFrame):
for col in df.columns:
df.loc[:, col] = _transform_outliers_sub(
df.loc[:, col],
cutoff=cutoff,
replace_with_cutoff=replace_with_cutoff,
method=method,
)
return df
elif isinstance(df, pd.Series):
return _transform_outliers_sub(
df, cutoff=cutoff, replace_with_cutoff=replace_with_cutoff, method=method
)
else:
raise ValueError("Data must be a pandas DataFrame or Series")
def calc_bpm(beat_interval, sampling_freq):
"""Calculate instantaneous BPM from beat to beat interval
Args:
beat_interval: (int) number of samples in between each beat
(typically R-R Interval)
sampling_freq: (float) sampling frequency in Hz
Returns:
bpm: (float) beats per minute for time interval
"""
return 60 * sampling_freq * (1 / (beat_interval))
def downsample(
data, sampling_freq=None, target=None, target_type="samples", method="mean"
):
"""Downsample pandas to a new target frequency or number of samples
using averaging.
Args:
data: (pd.DataFrame, pd.Series) data to downsample
sampling_freq: (float) Sampling frequency of data in hertz
target: (float) downsampling target
target_type: type of target can be [samples,seconds,hz]
method: (str) type of downsample method ['mean','median'],
default: mean
Returns:
out: (pd.DataFrame, pd.Series) downsmapled data
"""
if not isinstance(data, (pd.DataFrame, pd.Series)):
raise ValueError("Data must by a pandas DataFrame or Series instance.")
if not (method == "median") | (method == "mean"):
raise ValueError("Metric must be either 'mean' or 'median' ")
if target_type == "samples":
n_samples = target
elif target_type == "seconds":
n_samples = target * sampling_freq
elif target_type == "hz":
n_samples = sampling_freq / target
else:
raise ValueError('Make sure target_type is "samples", "seconds", ' ' or "hz".')
idx = np.sort(np.repeat(np.arange(1, data.shape[0] / n_samples, 1), n_samples))
# if data.shape[0] % n_samples:
if data.shape[0] > len(idx):
idx = np.concatenate([idx, np.repeat(idx[-1] + 1, data.shape[0] - len(idx))])
if method == "mean":
return data.groupby(idx).mean().reset_index(drop=True)
elif method == "median":
return data.groupby(idx).median().reset_index(drop=True)
def upsample(
data, sampling_freq=None, target=None, target_type="samples", method="linear"
):
"""Upsample pandas to a new target frequency or number of samples using interpolation.
Args:
data: (pd.DataFrame, pd.Series) data to upsample
(Note: will drop non-numeric columns from DataFrame)
sampling_freq: Sampling frequency of data in hertz
target: (float) upsampling target
target_type: (str) type of target can be [samples,seconds,hz]
method: (str) ['linear', 'nearest', 'zero', 'slinear', 'quadratic', 'cubic']
where 'zero', 'slinear', 'quadratic' and 'cubic'
refer to a spline interpolation of zeroth, first,
second or third order (default: linear)
Returns:
upsampled pandas object
"""
methods = ["linear", "nearest", "zero", "slinear", "quadratic", "cubic"]
if method not in methods:
raise ValueError(
"Method must be 'linear', 'nearest', 'zero', 'slinear', 'quadratic', 'cubic'"
)
if target_type == "samples":
n_samples = target
elif target_type == "seconds":
n_samples = target * sampling_freq
elif target_type == "hz":
n_samples = float(sampling_freq) / float(target)
else:
raise ValueError('Make sure target_type is "samples", "seconds", or "hz".')
orig_spacing = np.arange(0, data.shape[0], 1)
new_spacing = np.arange(0, data.shape[0] - 1, n_samples)
if isinstance(data, pd.Series):
interpolate = interp1d(orig_spacing, data, kind=method)
return interpolate(new_spacing)
elif isinstance(data, pd.DataFrame):
numeric_data = data._get_numeric_data()
if data.shape[1] != numeric_data.shape[1]:
warnings.warn(
"Dropping %s non-numeric columns"
% (data.shape[1] - numeric_data.shape[1]),
UserWarning,
)
out = pd.DataFrame(columns=numeric_data.columns, index=None)
for i, x in numeric_data.iteritems():
interpolate = interp1d(orig_spacing, x, kind=method)
out.loc[:, i] = interpolate(new_spacing)
return out
else:
raise ValueError("Data must by a pandas DataFrame or Series instance.")
def fisher_r_to_z(r):
"""Use Fisher transformation to convert correlation to z score"""
# return .5*np.log((1 + r)/(1 - r))
return np.arctanh(r)
def fisher_z_to_r(z):
"""Use Fisher transformation to convert correlation to z score"""
return np.tanh(z)
def correlation(data1, data2, metric="pearson"):
"""This function calculates the correlation between data1 and data2
Args:
data1: (np.array) x
data2: (np.array) y
metric: (str) type of correlation ["spearman" or "pearson" or "kendall"]
Returns:
r: (np.array) correlations
p: (float) p-value
"""
if metric == "spearman":
func = spearmanr
elif metric == "pearson":
func = pearsonr
elif metric == "kendall":
func = kendalltau
else:
raise ValueError('metric must be "spearman" or "pearson" or "kendall"')
return func(data1, data2)
def _permute_sign(data, random_state=None):
random_state = check_random_state(random_state)
return np.mean(data * random_state.choice([1, -1], len(data)))
def _permute_group(data, random_state=None):
random_state = check_random_state(random_state)
perm_label = random_state.permutation(data["Group"])
return | np.mean(data.loc[perm_label == 1, "Values"]) | numpy.mean |
# This file is part of GridCal.
#
# GridCal is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GridCal is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GridCal. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
import scipy.sparse as sp
import GridCal.Engine.Core.topology as tp
class BatteryData:
def __init__(self, nbatt, nbus, ntime=1):
"""
:param nbatt:
:param nbus:
"""
self.nbatt = nbatt
self.ntime = ntime
self.battery_names = np.empty(nbatt, dtype=object)
self.battery_controllable = np.zeros(nbatt, dtype=bool)
self.battery_installed_p = np.zeros(nbatt)
self.battery_active = np.zeros((nbatt, ntime), dtype=bool)
self.battery_p = np.zeros((nbatt, ntime))
self.battery_pf = np.zeros((nbatt, ntime))
self.battery_v = np.zeros((nbatt, ntime))
self.battery_qmin = np.zeros(nbatt)
self.battery_qmax = np.zeros(nbatt)
self.C_bus_batt = sp.lil_matrix((nbus, nbatt), dtype=int)
def slice(self, elm_idx, bus_idx, time_idx=None):
"""
:param elm_idx:
:param bus_idx:
:param time_idx:
:return:
"""
if time_idx is None:
tidx = elm_idx
else:
tidx = np.ix_(elm_idx, time_idx)
data = BatteryData(nbatt=len(elm_idx), nbus=len(bus_idx))
data.battery_names = self.battery_names[elm_idx]
data.battery_controllable = self.battery_controllable[elm_idx]
data.battery_active = self.battery_active[tidx]
data.battery_p = self.battery_p[tidx]
data.battery_pf = self.battery_pf[tidx]
data.battery_v = self.battery_v[tidx]
data.battery_qmin = self.battery_qmin[elm_idx]
data.battery_qmax = self.battery_qmax[elm_idx]
data.C_bus_batt = self.C_bus_batt[np.ix_(bus_idx, elm_idx)]
return data
def get_island(self, bus_idx):
return tp.get_elements_of_the_island(self.C_bus_batt.T, bus_idx)
def get_injections(self):
"""
Compute the active and reactive power of non-controlled batteries (assuming all)
:return:
"""
pf2 = np.power(self.battery_pf, 2.0)
pf_sign = (self.battery_pf + 1e-20) / np.abs(self.battery_pf + 1e-20)
Q = pf_sign * self.battery_p * np.sqrt((1.0 - pf2) / (pf2 + 1e-20))
return self.battery_p + 1.0j * Q
def get_injections_per_bus(self):
return self.C_bus_batt * (self.get_injections() * self.battery_active)
def get_bus_indices(self):
return self.C_bus_batt.tocsc().indices
def get_voltages_per_bus(self):
n_per_bus = self.C_bus_batt.sum(axis=1)
n_per_bus[n_per_bus == 0] = 1
# the division by n_per_bus achieves the averaging of the voltage control
# value if more than 1 battery is present per bus
# return self.C_bus_batt * (self.battery_v * self.battery_active) / n_per_bus
return np.array((self.C_bus_batt * self.battery_v) / n_per_bus)
def get_installed_power_per_bus(self):
return self.C_bus_batt * self.battery_installed_p
def get_qmax_per_bus(self):
return self.C_bus_batt * (self.battery_qmax.reshape(-1, 1) * self.battery_active)
def get_qmin_per_bus(self):
return self.C_bus_batt * (self.battery_qmin.reshape(-1, 1) * self.battery_active)
def __len__(self):
return self.nbatt
class BatteryOpfData(BatteryData):
def __init__(self, nbatt, nbus, ntime=1):
"""
:param nbatt:
:param nbus:
:param ntime:
"""
BatteryData.__init__(self, nbatt, nbus, ntime)
self.battery_dispatchable = np.zeros(nbatt, dtype=bool)
self.battery_pmax = np.zeros(nbatt)
self.battery_pmin = np.zeros(nbatt)
self.battery_enom = np.zeros(nbatt)
self.battery_min_soc = np.zeros(nbatt)
self.battery_max_soc = np.zeros(nbatt)
self.battery_soc_0 = | np.zeros(nbatt) | numpy.zeros |
from collections import namedtuple
import netCDF4
import numpy as np
import pytest
from numpy.testing import assert_array_almost_equal, assert_array_equal
from cloudnetpy.categorize import atmos
from cloudnetpy.products.lwc import CloudAdjustor, Lwc, LwcError, LwcSource
DIMENSIONS = ("time", "height", "model_time", "model_height")
TEST_ARRAY = np.arange(3)
CategorizeBits = namedtuple("CategorizeBits", ["category_bits", "quality_bits"])
@pytest.fixture(scope="session")
def lwc_source_file(tmpdir_factory, file_metadata):
file_name = tmpdir_factory.mktemp("data").join("file.nc")
with netCDF4.Dataset(file_name, "w", format="NETCDF4_CLASSIC") as root_grp:
_create_dimensions(root_grp)
_create_dimension_variables(root_grp)
var = root_grp.createVariable("altitude", "f8")
var[:] = 1
var.units = "km"
var = root_grp.createVariable("lwp", "f8", "time")
var[:] = [1, 1, 0.5]
var = root_grp.createVariable("lwp_error", "f8", "time")
var[:] = [0.2, 0.2, 0.1]
var = root_grp.createVariable("rain_rate", "i4", "time")
var[:] = [0, 1, 1]
var = root_grp.createVariable("category_bits", "i4", "time")
var[:] = [0, 1, 2]
var = root_grp.createVariable("quality_bits", "i4", "time")
var[:] = [8, 16, 32]
var = root_grp.createVariable("temperature", "f8", ("time", "height"))
var[:] = np.array([[282, 280, 278], [286, 284, 282], [284, 282, 280]])
var = root_grp.createVariable("pressure", "f8", ("time", "height"))
var[:] = np.array([[1010, 1000, 990], [1020, 1010, 1000], [1030, 1020, 1010]])
return file_name
def _create_dimensions(root_grp):
n_dim = len(TEST_ARRAY)
for dim_name in DIMENSIONS:
root_grp.createDimension(dim_name, n_dim)
def _create_dimension_variables(root_grp):
for dim_name in DIMENSIONS:
x = root_grp.createVariable(dim_name, "f8", (dim_name,))
x[:] = TEST_ARRAY
if dim_name == "height":
x.units = "m"
def test_get_atmosphere_t(lwc_source_file):
obj = LwcSource(lwc_source_file)
expected = np.array([[282, 280, 278], [286, 284, 282], [284, 282, 280]])
assert_array_equal(obj.atmosphere[0], expected)
def test_get_atmosphere_p(lwc_source_file):
obj = LwcSource(lwc_source_file)
expected = np.array([[1010, 1000, 990], [1020, 1010, 1000], [1030, 1020, 1010]])
assert_array_equal(obj.atmosphere[-1], expected)
class LwcSourceObj(LwcSource):
def __init__(self):
self.dheight = 10
self.categorize_bits = CategorizeBits(
category_bits={"droplet": np.asarray([[1, 0, 1], [0, 1, 1]], dtype=bool)},
quality_bits={
"radar": np.asarray([[1, 0, 1], [0, 1, 1]], dtype=bool),
"lidar": np.asarray([[1, 0, 1], [0, 1, 1]], dtype=bool),
},
)
self.atmosphere = (
np.array([[282, 281, 280], [280, 279, 278]]),
np.array([[101000, 100500, 100000], [100000, 99500, 99000]]),
)
self.lwp = np.array([2, 0])
self.lwp_error = np.array([0.1, 0.2])
self.is_rain = np.array([0, 1])
LWC_OBJ = Lwc(LwcSourceObj())
STATUS_OBJ = CloudAdjustor(LwcSourceObj(), LWC_OBJ)
ERROR_OBJ = LwcError(LwcSourceObj(), LWC_OBJ)
@pytest.mark.parametrize("value", [0, 1])
def test_get_liquid(value):
assert value in LWC_OBJ.is_liquid
def test_init_lwc_adiabatic():
lwc_source = LwcSourceObj()
expected = atmos.fill_clouds_with_lwc_dz(lwc_source.atmosphere, LWC_OBJ.is_liquid)
expected[0, 0] *= 10
expected[0, 2] *= 10
expected[1, 1] *= 10
expected[1, 2] *= 20
assert_array_almost_equal(LWC_OBJ._init_lwc_adiabatic(), expected)
def test_screen_rain_lwc():
expected = np.ma.array([[5, 1, 2], [3, 6, 0]], mask=[[0, 0, 0], [1, 1, 1]])
assert_array_equal(expected.mask, LWC_OBJ.lwc.mask)
@pytest.mark.parametrize("value", [0, 1])
def test_init_status(value):
assert value in STATUS_OBJ._init_status()
@pytest.mark.parametrize("key", ["radar", "lidar"])
def test_get_echo(key):
assert key in STATUS_OBJ.echo.keys()
@pytest.mark.parametrize("value", [0, 1, 2])
def test_update_status(value):
time = np.array([0])
STATUS_OBJ._update_status(time)
assert value in STATUS_OBJ.status
@pytest.mark.parametrize("value", [0, 1, 2, 3])
def test_adjust_lwc(value):
time = 0
base = 0
STATUS_OBJ.status = np.array([[1, 0, 2], [0, 0, 2]])
STATUS_OBJ._adjust_lwc(time, base)
assert value in STATUS_OBJ.status
def test_has_converged():
ind = 1
assert STATUS_OBJ._has_converged(ind) is True
def test_out_of_bound():
ind = 2
assert STATUS_OBJ._out_of_bound(ind) is True
def test_find_adjustable_clouds():
assert 1 not in STATUS_OBJ._find_adjustable_clouds()
def test_find_topmost_clouds():
expected = np.asarray([[0, 0, 1], [0, 1, 1]], dtype=bool)
assert_array_equal(STATUS_OBJ._find_topmost_clouds(), expected)
def test_find_echo_combinations_in_liquid():
STATUS_OBJ.echo["lidar"] = np.array([[0, 1, 0], [1, 1, 0]])
STATUS_OBJ.echo["radar"] = np.array([[0, 0, 0], [0, 1, 1]])
STATUS_OBJ.is_liquid = np.array([[1, 1, 1], [0, 1, 1]])
expected = | np.array([[0, 1, 0], [0, 3, 2]]) | numpy.array |
import numpy as np
import matplotlib.pyplot as plt
import torch
#################################################
# sinusoid regression, from original MAML paper #
#################################################
# Quote from the paper:
# the amplitude varies within [0.1, 5.0] and the phase varies within [0, π],
# and the input and output both have a dimensionality of 1.
# During training and testing, datapoints x are sampled uniformly from [−5.0, 5.0].
# The loss is the mean-squared error
# between the prediction f(x) and true value.
# The regressor is a neural network model with 2 hidden layers of size
# 40 with ReLU nonlinearities. When training with MAML,
# we use one gradient update with K = 10 examples with
# a fixed step size α = 0.01, and use Adam as the metaoptimizer"
def sinusoid_get_random_task():
amplitude = np.random.uniform(0.1, 5.0)
phase = np.random.uniform(0, np.pi)
return amplitude,phase
def sinusoid_get_random_task_batch(amplitude,phase,support_k,query_k):
support_x = torch.from_numpy( np.random.uniform(-5.0, 5.0,(support_k,1))).float()
query_x = torch.from_numpy( np.random.uniform(-5.0, 5.0,(query_k,1))).float()
support_y = amplitude * torch.sin(support_x - phase)
query_y = amplitude * torch.sin(query_x - phase)
return support_x,support_y,query_x,query_y
# Will return data in the shape: [meta_batch_size, k , 1]
def sinusoid_get_meta_batch(meta_batch_size,n_way,support_k,query_k,is_test=False):
# n_way is always one for sinusoidal regression, so it is ignored here
# is_test is to differentiate between test and train tasks, for sinusoid regression, we dont differentiate.
tasks = [sinusoid_get_random_task() for _ in range(meta_batch_size)]
support_x,support_y,query_x,query_y = [],[],[],[]
for amplitude,phase in tasks:
a,b,c,d = sinusoid_get_random_task_batch(amplitude,phase,support_k,query_k)
support_x.append(a)
support_y.append(b)
query_x.append(c)
query_y.append(d)
support_x = torch.stack(support_x)
support_y = torch.stack(support_y)
query_x = torch.stack(query_x)
query_y = torch.stack(query_y)
return support_x,support_y,query_x,query_y
# OMNIGLOT from MAML PAPER
# we also provide results for a non-convolutional network. For this, we use a
# network with 4 hidden layers with sizes 256, 128, 64, 64,
# each including batch normalization and ReLU nonlinearities, followed by a linear layer and softmax. For all models,
# the loss function is the cross-entropy error between the predicted and true class.
# Additional hyperparameter details are included in Appendix A.1.
# For N-way, K-shot classification, each gradient is computed using a batch size of NK examples. For Omniglot,
# the 5-way convolutional and non-convolutional MAML
# models were each trained with 1 gradient step with step size
# α = 0.4 and a meta batch-size of 32 tasks. The network
# was evaluated using 3 gradient steps with the same step
# size α = 0.4. The 20-way convolutional MAML model
# was trained and evaluated with 5 gradient steps with step
# size α = 0.1. During training, the meta batch-size was set
# to 16 tasks. For MiniImagenet, both models were trained
# using 5 gradient steps of size α = 0.01, and evaluated using
# 10 gradient steps at test time. Following Ravi & Larochelle
# (2017), 15 examples per class were used for evaluating the
# post-update meta-gradient. We used a meta batch-size of
# 4 and 2 tasks for 1-shot and 5-shot training respectively.
# All models were trained for 60000 iterations on a single
# NVIDIA Pascal Titan X GPU
def omniglot_get_meta_batch(meta_batch_size,n_way,support_k,query_k,is_test=False):
# imort the module the last moment before use, the first import will trigger the load of the whole dataset to memory
import es_maml.omniglot.omniglot_data_singleton
data = es_maml.omniglot.omniglot_data_singleton.dataset
omniglot_shuffled_indicies = es_maml.omniglot.omniglot_data_singleton.omniglot_shuffled_indicies
SHUFFLE_CLASSES = True
AUGMENT_WITH_ROTATION = True
if SHUFFLE_CLASSES is True:
train_indicies = omniglot_shuffled_indicies[:1200]
test_indicies = omniglot_shuffled_indicies[1200:]
else:
train_indicies = list(range(1200))
test_indicies = list(range(1200,data.shape[0]))
class_indicies = train_indicies
if is_test is True:
class_indicies = test_indicies
support_x = []
query_x = []
support_y = []
query_y = []
for meta_batch_i in range(meta_batch_size):
selected_class_indicies = np.random.choice(class_indicies,n_way,replace=False)
task_support_x = []
task_query_x = []
task_support_y = []
task_query_y = []
for class_i_in_batch,class_i in enumerate(selected_class_indicies):
selected_images = np.random.choice(list(range(20)),support_k+query_k,replace=False) # if support_k+query_k = 20, this will be a permutation
class_data = data[class_i,selected_images]
# Each class can be augmented by rotation, we select the rotation after selecting distinct classes
# This means we cannot have a task with the same charater with different rotations, which is what we want
if AUGMENT_WITH_ROTATION is True:
selected_rotation = np.random.choice([0,1,2,3]) # multiples of 90 degree
# np.rot90 cannot handle channels, take the one channel, channel 0, and add it back after rotation
class_data = [np.rot90(class_data[i,0],selected_rotation).reshape(1,28,28) for i in range(len(selected_images))]
class_data = np.stack(class_data) # we are back to the original shape
class_support_x = class_data[:support_k]
class_query_x = class_data[support_k:]
class_support_y = np.repeat(class_i_in_batch,support_k)
class_query_y = np.repeat(class_i_in_batch,query_k)
task_support_x.append(class_support_x)
task_query_x.append(class_query_x)
task_support_y.append(class_support_y)
task_query_y.append(class_query_y)
task_support_x = np.stack(task_support_x)
task_query_x = np.stack(task_query_x)
task_support_y = np.stack(task_support_y)
task_query_y = | np.stack(task_query_y) | numpy.stack |
"""
Module to detect and process lanes
"""
import numpy as np
import matplotlib.pyplot as plt
import cv2
import os
class Line:
"""
Class to store characteristics of each line detection
"""
def __init__(self):
# Has the line been detected earlier
self.iterations = 0
# Polynomial coefficients averaged over the last n iterations
self.best_fit = np.array([0, 0, 0])
# Polynomial coefficients for the most recent fit
self.current_fit = np.array([0, 0, 0])
# Curvature
self.curvature = 0
class LaneDetector:
"""
Module to detect and process lanes from
binary images
"""
def __init__(self, nwindows = 10, margin = 100, minpix = 100,
recalculate_margin = 150, smoothing_parameter = 10):
self.nwindows = nwindows
self.margin = margin
self.minpix = minpix
self.recalculate_margin = recalculate_margin
self.smoothing_parameter = smoothing_parameter
self.left_line = Line()
self.right_line = Line()
def pipeline(self, image_file, output_file, output_file_poly):
"""
image_file: binary image file to detect lanes on
"""
image = cv2.imread(image_file, cv2.IMREAD_UNCHANGED)
leftx, lefty, rightx, righty = self._calculate_lane_pixels(image)
output_image, ploty, left_fit, right_fit = self._fit_polynomial(image, leftx, lefty, rightx, righty)
cv2.imwrite(output_file, output_image)
plt.imshow(output_image)
plt.savefig(output_file_poly)
plt.clf()
self.left_line.iterations = 0
self.right_line.iterations = 0
curvature, center = self._calculate_curvature(ploty, left_fit, right_fit, image.shape)
def pipeline_image(self, image):
if self.left_line.iterations == 0:
leftx, lefty, rightx, righty = self._calculate_lane_pixels(image)
else:
leftx, lefty, rightx, righty = self._recalculate_lane_pixels(image)
# Check to see the lanes are correct
if self._sanity_check(leftx, lefty, rightx, righty):
self.left_line.iterations = 0
self.right_line.iterations = 0
leftx, lefty, rightx, righty = self._calculate_lane_pixels(image)
output_image, ploty, left_fit, right_fit = self._fit_polynomial(image, leftx, lefty, rightx, righty)
curvature, center = self._calculate_curvature(ploty, left_fit, right_fit, image.shape)
return output_image, curvature, center
def _calculate_lane_pixels(self, image):
"""
Private function to calculate lane pixels
"""
# Reset the line data
self.left_line = Line()
self.right_line = Line()
# Histogram based method to detect lanes
bottom_half = image[3 * image.shape[0] // 5:, :]
histogram = np.sum(bottom_half, axis = 0)
# Define variables for the detection loop
midpoint = np.int32(histogram.shape[0] // 2)
leftx_base = np.argmax(histogram[:midpoint])
rightx_base = np.argmax(histogram[midpoint:]) + midpoint
window_height = np.int32(image.shape[0] // self.nwindows)
nonzero = image.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
leftx_current = leftx_base
rightx_current = rightx_base
left_lane_indices = []
right_lane_indices = []
# Loop through the number of windows to detect
for window in range(self.nwindows):
# Calculate the boundaries
win_y_low = image.shape[0] - (window + 1) * window_height
win_y_high = image.shape[0] - window * window_height
win_xleft_low = leftx_current - self.margin
win_xleft_high = leftx_current + self.margin
win_xright_low = rightx_current - self.margin
win_xright_high = rightx_current + self.margin
# Identify the nonzero pixels
good_left_indices = ((nonzerox >= win_xleft_low) & (nonzerox < win_xleft_high) \
& (nonzeroy >= win_y_low) & (nonzeroy < win_y_high)).nonzero()[0]
good_right_indices = ((nonzerox >= win_xright_low) & (nonzerox < win_xright_high) \
& (nonzeroy >= win_y_low) & (nonzeroy < win_y_high)).nonzero()[0]
left_lane_indices.append(good_left_indices)
right_lane_indices.append(good_right_indices)
# Recenter pixels
if len(good_left_indices) > self.minpix:
leftx_current = np.int32(np.mean(nonzerox[good_left_indices]))
if len(good_right_indices) > self.minpix:
rightx_current = np.int32(np.mean(nonzerox[good_right_indices]))
# Concatenate arrays of indices
try:
left_lane_indices = np.concatenate(left_lane_indices)
right_lane_indices = np.concatenate(right_lane_indices)
except:
pass
# Extract left and right lane pixel positions
leftx = nonzerox[left_lane_indices]
lefty = nonzeroy[left_lane_indices]
rightx = nonzerox[right_lane_indices]
righty = nonzeroy[right_lane_indices]
return leftx, lefty, rightx, righty
def _recalculate_lane_pixels(self, image):
"""
Private function to recalculate lane pixels using
previously derived polynomials
"""
# Get the non zero pixels
nonzero = image.nonzero()
nonzeroy = | np.array(nonzero[0]) | numpy.array |
from __future__ import division
from pymer4.models import Lmer, Lm
from pymer4.utils import get_resource_path
import pandas as pd
import numpy as np
import os
from scipy.special import logit
def test_gaussian_lm():
df = pd.read_csv(os.path.join(get_resource_path(), 'sample_data.csv'))
model = Lm('DV ~ IV1 + IV3', data=df)
model.fit(summarize=False)
assert model.coefs.shape == (3, 8)
estimates = np.array([42.24840439, 0.24114414, -3.34057784])
assert np.allclose(model.coefs['Estimate'], estimates, atol=.001)
# Test robust SE against statsmodels
standard_se = np.array([6.83783939, 0.30393886, 3.70656475])
assert np.allclose(model.coefs['SE'], standard_se, atol=.001)
hc0_se = | np.array([7.16661817, 0.31713064, 3.81918182]) | numpy.array |
# Author: <NAME>
import superimport
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import minimize, line_search
def aoki_vectorized(x):
"""
F(x,y) = 0.5 (x^2 - y)^2 + 0.5 (x-1)^2
"""
f = 0.5 * np.square(np.square(x[:][0]) - x[:][1]) + 0.5 * np.square(x[:][0] - 1)
return f
def aoki(x):
"""
F(x,y) = 0.5 (x^2 - y)^2 + 0.5 (x-1)^2
"""
f = 0.5 * np.square(np.square(x[0]) - x[1]) + 0.5 * np.square(x[0] - 1)
return f
def aoki_gd(x):
"""
First-Order derivative of aoki function(Nabia - 1)
"""
g_x = 2 * np.dot((np.square(x[0]) - x[1]), x[0]) + x[0] - 1
g_y = -1 * (np.square(x[0]) - x[1])
return np.array((g_x, g_y))
def aoki_hess(x):
"""
Second-Order derivative - Hessian Matrix of aoki function(Nabia - 2)
"""
g_xx = 6 * np.square(x[0]) - 2*x[1] + 1
g_xy = -2 * x[0]
g_yy = 1
H = | np.diag((2,2)) | numpy.diag |
import MPImfp
import MPLn23d
import cv2
import sys
import random
import numpy as np
import json
import math as m
from PyQt5 import QtCore, QtGui, QtWidgets
from matplotlib.figure import Figure
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
"""
Image Scene
"""
class ImageScene(QtWidgets.QGraphicsScene):
addLine = QtCore.pyqtSignal(float, float, float, float)
addRect = QtCore.pyqtSignal(float, float, float, float)
clearRect = QtCore.pyqtSignal(float, float, float, float)
measurePixel = QtCore.pyqtSignal(float)
mouseNone = 0
mouseLine = 1
mouseRect = 2
mouseClear = 3
mouseMeasure = 4
def __init__(self, *argv, **keywords):
super(ImageScene, self).__init__(*argv, **keywords)
self.cvimg = None
self.imageItem = None
self.roiItem = None
self.scale = 1.0
self.startPos = None
self.mouseMode = self.mouseNone
def setImage(self, cvimg):
self.cvimg = cvimg
if len(cvimg.shape) == 2:
height, width = cvimg.shape
qimg = QtGui.QImage(cvimg.data, width, height, width, QtGui.QImage.Format_Indexed8)
elif len(cvimg.shape) == 3:
height, width, dim = cvimg.shape
qimg = QtGui.QImage(cvimg.data, width, height, dim * width, QtGui.QImage.Format_RGB888)
qimg = qimg.rgbSwapped()
pixmap = QtGui.QPixmap.fromImage(qimg)
if self.imageItem:
self.removeItem(self.imageItem)
self.imageItem = QtWidgets.QGraphicsPixmapItem(pixmap)
self.addItem(self.imageItem)
self.__Scale()
def clearImage(self):
self.cvimg = None
if self.imageItem:
self.removeItem(self.imageItem)
self.imageItem = None
def __Scale(self):
if self.imageItem:
self.imageItem.setScale(self.scale)
w = self.scale * self.cvimg.shape[1]
h = self.scale * self.cvimg.shape[0]
self.setSceneRect(0, 0, w, h)
if self.roiItem:
self.roiItem.setScale(self.scale)
def setScale(self, scale):
self.scale = scale
self.__Scale()
def calcFitScale(self):
if self.cvimg is not None:
view = self.views()[0]
sw = float(view.width()) / float(self.cvimg.shape[1])
sh = float(view.height()) / float(self.cvimg.shape[0])
if sw < sh:
return sw
else:
return sh
else:
return 1.0
def drawROI(self, x, y, w, h):
if self.roiItem:
self.removeItem(self.roiItem)
self.roiItem = QtWidgets.QGraphicsRectItem()
pen = QtGui.QPen(QtGui.QColor(255,0,0))
self.roiItem.setPen(pen)
self.roiItem.setRect(x, y, w, h)
self.roiItem.setScale(self.scale)
self.addItem(self.roiItem)
def clearROI(self):
if self.roiItem:
self.removeItem(self.roiItem)
self.roiItem = None
def clearAll(self):
self.clearImage()
self.clearROI()
def mousePressEvent(self, event):
if self.imageItem and event.button() == QtCore.Qt.LeftButton:
self.startPos = event.scenePos()
if self.mouseMode == self.mouseLine:
self.line_item = QtWidgets.QGraphicsLineItem()
pen = QtGui.QPen(QtGui.QColor(0,255,0))
self.line_item.setPen(pen)
self.addItem(self.line_item)
elif self.mouseMode == self.mouseRect:
self.rect_item = QtWidgets.QGraphicsRectItem()
pen = QtGui.QPen(QtGui.QColor(0,255,0))
self.rect_item.setPen(pen)
self.addItem(self.rect_item)
elif self.mouseMode == self.mouseClear:
self.rect_item = QtWidgets.QGraphicsRectItem()
pen = QtGui.QPen(QtGui.QColor(0,0,255))
self.rect_item.setPen(pen)
self.addItem(self.rect_item)
elif self.mouseMode == self.mouseMeasure:
self.line_item = QtWidgets.QGraphicsLineItem()
pen = QtGui.QPen(QtGui.QColor(255,0,0))
self.line_item.setPen(pen)
self.addItem(self.line_item)
def mouseMoveEvent(self, event):
if self.startPos:
start = self.startPos
cur = event.scenePos()
if self.mouseMode == self.mouseLine:
self.line_item.setLine(start.x(), start.y(), cur.x(), cur.y())
elif self.mouseMode == self.mouseRect:
self.rect_item.setRect(start.x(), start.y(), cur.x() - start.x(), cur.y() - start.y())
elif self.mouseMode == self.mouseClear:
self.rect_item.setRect(start.x(), start.y(), cur.x() - start.x(), cur.y() - start.y())
elif self.mouseMode == self.mouseMeasure:
self.line_item.setLine(start.x(), start.y(), cur.x(), cur.y())
def mouseReleaseEvent(self, event):
if self.startPos:
sx = self.startPos.x() / self.scale
sy = self.startPos.y() / self.scale
ex = event.scenePos().x() / self.scale
ey = event.scenePos().y() / self.scale
if self.mouseMode == self.mouseLine:
self.removeItem(self.line_item)
self.addLine.emit(sx, sy, ex, ey)
elif self.mouseMode == self.mouseRect:
self.removeItem(self.rect_item)
self.addRect.emit(sx, sy, ex, ey)
elif self.mouseMode == self.mouseClear:
self.removeItem(self.rect_item)
self.clearRect.emit(sx, sy, ex, ey)
elif self.mouseMode == self.mouseMeasure:
self.removeItem(self.line_item)
dx = ex - sx
dy = ey - sy
dis = m.sqrt(dx * dx + dy * dy)
self.measurePixel.emit(dis)
self.startPos = None
super(ImageScene, self).mouseReleaseEvent(event)
def setMouseMode(self, mode):
self.mouseMode = mode
"""
Graphics View
"""
class GraphicsView(QtWidgets.QGraphicsView):
def __init__(self):
super(GraphicsView, self).__init__()
self.setCacheMode(QtWidgets.QGraphicsView.CacheBackground)
self.setRenderHints(QtGui.QPainter.Antialiasing | QtGui.QPainter.SmoothPixmapTransform | QtGui.QPainter.TextAntialiasing)
def sizeHint(self):
return QtCore.QSize(1024, 768)
"""
File Widget
"""
class FileWidget(QtWidgets.QWidget):
imageChanged = QtCore.pyqtSignal(np.ndarray)
roiChanged = QtCore.pyqtSignal(int, int, int, int)
Units = ['px', 'km', 'm', 'cm', 'mm', 'um', 'nm']
def __init__(self, parent):
super(FileWidget, self).__init__(parent)
self.org_img = None
self.insitu = True
vbox = QtWidgets.QVBoxLayout(self)
self.name_label = QtWidgets.QLabel('Name :')
vbox.addWidget(self.name_label)
self.size_label = QtWidgets.QLabel('Size :')
vbox.addWidget(self.size_label)
vbox.addWidget(QtWidgets.QLabel('Resize :'))
hbox = QtWidgets.QHBoxLayout()
vbox.addLayout(hbox)
self.spin5 = QtWidgets.QSpinBox()
self.spin5.valueChanged[int].connect(self.changeWidth)
hbox.addWidget(self.spin5)
self.spin6 = QtWidgets.QSpinBox()
self.spin6.valueChanged[int].connect(self.changeHeight)
hbox.addWidget(self.spin6)
vbox.addWidget(QtWidgets.QLabel('ROI :'))
glay = QtWidgets.QGridLayout()
vbox.addLayout(glay)
self.spin1 = QtWidgets.QSpinBox()
self.spin1.valueChanged[int].connect(self.changeROI)
glay.addWidget(self.spin1, 0, 0)
self.spin2 = QtWidgets.QSpinBox()
self.spin2.valueChanged[int].connect(self.changeROI)
glay.addWidget(self.spin2, 0, 1)
self.spin3 = QtWidgets.QSpinBox()
self.spin3.valueChanged[int].connect(self.changeROI)
glay.addWidget(self.spin3, 1, 0)
self.spin4 = QtWidgets.QSpinBox()
self.spin4.valueChanged[int].connect(self.changeROI)
glay.addWidget(self.spin4, 1, 1)
button2 = QtWidgets.QPushButton('Reset ROI')
button2.clicked[bool].connect(self.resetROI)
vbox.addWidget(button2)
vbox.addWidget(QtWidgets.QLabel('Pixels in Scale :'))
hbox0 = QtWidgets.QHBoxLayout()
vbox.addLayout(hbox0)
self.line1 = QtWidgets.QLineEdit()
self.line1.setValidator(QtGui.QDoubleValidator())
self.line1.setText('1.0')
hbox0.addWidget(self.line1)
self.button3 = QtWidgets.QPushButton('Measure')
self.button3.setCheckable(True)
hbox0.addWidget(self.button3)
vbox.addWidget(QtWidgets.QLabel('Scale :'))
hbox1 = QtWidgets.QHBoxLayout()
vbox.addLayout(hbox1)
self.line2 = QtWidgets.QLineEdit()
self.line2.setValidator(QtGui.QDoubleValidator())
self.line2.setText('1.0')
hbox1.addWidget(self.line2)
self.combo = QtWidgets.QComboBox()
for unit in self.Units:
self.combo.addItem(unit)
hbox1.addWidget(self.combo)
self.check1 = QtWidgets.QCheckBox('Measure')
self.check1.setChecked(True)
vbox.addWidget(self.check1)
vbox.addStretch()
def clearFile(self):
self.insitu = False
self.org_img = None
self.name_label.setText('Name :')
self.size_label.setText('Size :')
self.spin1.setValue(0)
self.spin2.setValue(0)
self.spin3.setValue(0)
self.spin4.setValue(0)
self.line1.setText('1.0')
self.line2.setText('1.0')
self.combo.setCurrentIndex(0)
self.check1.setChecked(True)
self.insitu = True
def setInfo(self, info):
self.insitu = False
roi = info['FileROI']
size = info['FileSize']
resize = info['FileResize']
fname = info['FileName']
filename = QtCore.QFileInfo(fname).fileName()
self.name_label.setText('Name : ' + filename)
self.size_label.setText('Size : ' + str(size[0]) + ' x ' + str(size[1]))
self.spin1.setMinimum(0)
self.spin1.setMaximum(resize[0])
self.spin1.setValue(roi[0])
self.spin2.setMinimum(0)
self.spin2.setMaximum(resize[1])
self.spin2.setValue(roi[1])
self.spin3.setMinimum(0)
self.spin3.setMaximum(resize[0])
self.spin3.setValue(roi[2])
self.spin4.setMinimum(0)
self.spin4.setMaximum(resize[1])
self.spin4.setValue(roi[3])
self.spin5.setMinimum(1)
self.spin5.setMaximum(10 * size[0])
self.spin5.setValue(resize[0])
self.spin6.setMinimum(1)
self.spin6.setMaximum(10 * size[1])
self.spin6.setValue(resize[1])
self.insitu = True
self.line1.setText(str(info['FilePixels']))
self.line2.setText(str(info['FileScale']))
self.combo.setCurrentIndex(info['FileUnit'])
self.check1.setChecked(info['FileMeasure'])
def getInfo(self, fname=None):
info = {}
if fname == None:
if self.org_img is not None:
height, width, dim = self.org_img.shape
info['FileSize'] = [width, height]
else:
info['FileSize'] = [0, 0]
info['FileResize'] = [self.spin5.value(), self.spin6.value()]
info['FileROI'] = [self.spin1.value(), self.spin2.value(), self.spin3.value(), self.spin4.value()]
else:
img = cv2.imread(str(fname), 1)
height, width, dim = img.shape
info['FileSize'] = [width, height]
info['FileResize'] = [width, height]
info['FileROI'] = [0, 0, width, height]
info['FilePixels'] = float(self.line1.text())
info['FileScale'] = float(self.line2.text())
info['FileUnit'] = self.combo.currentIndex()
info['FileMeasure'] = self.check1.isChecked()
return info
def changeROI(self):
if self.insitu:
sx = self.spin1.value()
sy = self.spin2.value()
ex = self.spin3.value()
ey = self.spin4.value()
self.roiChanged.emit(sx, sy, ex - sx, ey - sy)
def resetROI(self):
self.initROI()
self.changeROI()
def initROI(self):
width = self.spin5.value()
height = self.spin6.value()
self.insitu = False
self.spin1.setMaximum(width)
self.spin1.setValue(0)
self.spin2.setMaximum(height)
self.spin2.setValue(0)
self.spin3.setMaximum(width)
self.spin3.setValue(width)
self.spin4.setMaximum(height)
self.spin4.setValue(height)
self.insitu = True
def changeWidth(self):
if self.insitu:
height, width, dim = self.org_img.shape
self.insitu = False
self.spin6.setValue(self.spin5.value() * height / width)
self.insitu = True
self.initROI()
self.setImage()
def changeHeight(self):
if self.insitu:
height, width, dim = self.org_img.shape
self.insitu = False
self.spin5.setValue(self.spin6.value() * width / height)
self.insitu = True
self.initROI()
self.setImage()
def setImage(self):
if self.org_img is not None:
height, width, dim = self.org_img.shape
if self.spin5.value() == width and self.spin6.value() == height:
self.imageChanged.emit(self.org_img)
else:
res_img = cv2.resize(self.org_img, (self.spin5.value(), self.spin6.value()))
self.imageChanged.emit(res_img)
sx = self.spin1.value()
sy = self.spin2.value()
ex = self.spin3.value()
ey = self.spin4.value()
self.roiChanged.emit(sx, sy, ex - sx, ey - sy)
def measurePixel(self, dis):
if self.button3.isChecked():
mdis = dis * self.org_img.shape[1] / self.spin5.value()
self.line1.setText(str(mdis))
@staticmethod
def Process(src_img, info):
if src_img is not None:
height, width, dim = src_img.shape
roi = info['FileROI']
resize = info['FileResize']
if resize[0] == width and resize[1] == height:
return src_img[roi[1]:roi[3], roi[0]:roi[2]]
else:
res_img = cv2.resize(src_img, (resize[0], resize[1]))
return res_img[roi[1]:roi[3], roi[0]:roi[2]]
else:
return None
@staticmethod
def PixelSize(info):
size = info['FileSize']
resize = info['FileResize']
pixels = info['FilePixels']
scale = info['FileScale']
unit = info['FileUnit']
ps = scale / pixels * size[0] / resize[0]
return [ps, FileWidget.Units[unit]]
"""
Contrast Widget
"""
class ContrastWidget(QtWidgets.QWidget):
imageChanged = QtCore.pyqtSignal(np.ndarray)
def __init__(self, parent):
super(ContrastWidget, self).__init__(parent)
self.src_img = None
self.insitu = True
vbox = QtWidgets.QVBoxLayout(self)
self.figure = Figure(figsize=(1,2))
self.figure.subplots_adjust(bottom=0.1, top=0.98, left=0.02, right=0.98, hspace=0.02)
self.canvas = FigureCanvas(self.figure)
vbox.addWidget(self.canvas)
self.check1 = QtWidgets.QCheckBox('Background Subtraction')
self.check1.stateChanged[int].connect(self.setImage)
vbox.addWidget(self.check1)
vbox.addWidget(QtWidgets.QLabel('Filter Size :'))
self.spin1 = QtWidgets.QSpinBox()
self.spin1.setMinimum(1)
self.spin1.setMaximum(1999)
self.spin1.setSingleStep(2)
self.spin1.setValue(255)
self.spin1.valueChanged[int].connect(self.setImage)
vbox.addWidget(self.spin1)
vbox.addWidget(QtWidgets.QLabel('LUT Min. and Max. :'))
hbox = QtWidgets.QHBoxLayout()
vbox.addLayout(hbox)
self.spin2 = QtWidgets.QSpinBox()
self.spin2.setMinimum(0)
self.spin2.setMaximum(255)
self.spin2.setValue(0)
self.spin2.valueChanged[int].connect(self.setImage)
hbox.addWidget(self.spin2)
self.spin3 = QtWidgets.QSpinBox()
self.spin3.setMinimum(0)
self.spin3.setMaximum(255)
self.spin3.setValue(255)
self.spin3.valueChanged[int].connect(self.setImage)
hbox.addWidget(self.spin3)
vbox.addStretch()
def setInfo(self, info):
self.insitu = False
self.check1.setChecked(info['ContBGSubtract'])
self.spin1.setValue(info['ContFilterSize'])
self.spin2.setValue(info['ContLUTMin'])
self.spin3.setValue(info['ContLUTMax'])
self.insitu = True
def getInfo(self):
info = {}
info['ContBGSubtract'] = self.check1.isChecked()
info['ContFilterSize'] = self.spin1.value()
info['ContLUTMin'] = self.spin2.value()
info['ContLUTMax'] = self.spin3.value()
return info
def setImage(self):
if self.insitu:
dst_img = self.Process(self.src_img, self.getInfo())
if dst_img is not None:
self.drawGraph(self.src_img, dst_img)
self.imageChanged.emit(dst_img)
def drawHist(self, img, ax):
lv = range(256)
if len(img.shape) == 2:
hist = cv2.calcHist([img], [0], None, [256], [0,256])
ax.plot(lv, hist, 'k')
elif len(img.shape) == 3:
col = ['b', 'g', 'r']
for i in range(3):
hist = cv2.calcHist([img], [i], None, [256], [0,256])
ax.plot(lv, hist, col[i])
ax.set_xlim(0, 255)
def drawGraph(self, src_img, dst_img):
self.figure.clf()
ax1 = self.figure.add_subplot(2,1,1)
ax1.xaxis.set_ticklabels([])
ax1.yaxis.set_ticklabels([])
self.drawHist(src_img, ax1)
ax2 = self.figure.add_subplot(2,1,2)
ax2.yaxis.set_ticklabels([])
self.drawHist(dst_img, ax2)
self.canvas.draw()
@staticmethod
def Process(src_img, info):
if src_img is None:
return None
if info['ContBGSubtract']:
fsize = info['ContFilterSize']
fore = src_img.astype(np.int32)
back = cv2.blur(src_img, (fsize, fsize)).astype(np.int32)
sub = fore - back + 127
img = sub.astype(np.uint8)
else:
img = src_img
lutmin = info['ContLUTMin']
lutmax = info['ContLUTMax']
diff = lutmax - lutmin
lut = np.empty((256, 1), dtype='uint8')
for i in range(256):
if i <= lutmin:
lut[i][0] = 0
elif i >= lutmax:
lut[i][0] = 255
else:
lut[i][0] = 255 * (i - lutmin) / diff
return cv2.LUT(img, lut)
"""
Filter Widget
"""
class FilterWidget(QtWidgets.QWidget):
imageChanged = QtCore.pyqtSignal(np.ndarray)
def __init__(self, parent):
super(FilterWidget, self).__init__(parent)
self.src_img = None
self.insitu = True
vbox = QtWidgets.QVBoxLayout(self)
vbox.addWidget(QtWidgets.QLabel('Type :'))
self.combo1 = QtWidgets.QComboBox()
self.combo1.addItem('None')
self.combo1.addItem('Blur')
self.combo1.addItem('Gaussian')
self.combo1.addItem('Median')
self.combo1.addItem('Bilateral')
self.combo1.currentIndexChanged[int].connect(self.filterChanged)
vbox.addWidget(self.combo1)
vbox.addWidget(QtWidgets.QLabel('Size :'))
self.spin1 = QtWidgets.QSpinBox()
self.spin1.setMinimum(1)
self.spin1.setMaximum(99)
self.spin1.setValue(1)
self.spin1.setSingleStep(2)
self.spin1.setEnabled(False)
self.spin1.valueChanged[int].connect(self.setImage)
vbox.addWidget(self.spin1)
vbox.addWidget(QtWidgets.QLabel('Sigma0 :'))
self.spin2 = QtWidgets.QSpinBox()
self.spin2.setMinimum(0)
self.spin2.setMaximum(300)
self.spin2.setValue(0)
self.spin2.setEnabled(False)
self.spin2.valueChanged[int].connect(self.setImage)
vbox.addWidget(self.spin2)
vbox.addWidget(QtWidgets.QLabel('Sigma1 :'))
self.spin3 = QtWidgets.QSpinBox()
self.spin3.setMinimum(0)
self.spin3.setMaximum(300)
self.spin3.setValue(0)
self.spin3.setEnabled(False)
self.spin3.valueChanged[int].connect(self.setImage)
vbox.addWidget(self.spin3)
vbox.addStretch()
def setInfo(self, info):
self.insitu = False
self.combo1.setCurrentIndex(info['FilterType'])
self.spin1.setValue(info['FilterSize'])
self.spin2.setValue(info['FilterSigma0'])
self.spin3.setValue(info['FilterSigma1'])
self.insitu = True
def getInfo(self):
info = {}
info['FilterType'] = self.combo1.currentIndex()
info['FilterSize'] = self.spin1.value()
info['FilterSigma0'] = self.spin2.value()
info['FilterSigma1'] = self.spin3.value()
return info
def filterChanged(self):
ftype = self.combo1.currentIndex()
flag = [[False, False, False], [True, False, False],\
[True, True, True], [True, False, False], [True, True, True]]
self.spin1.setEnabled(flag[ftype][0])
self.spin2.setEnabled(flag[ftype][1])
self.spin3.setEnabled(flag[ftype][2])
self.setImage()
def setImage(self):
if self.insitu:
dst_img = self.Process(self.src_img, self.getInfo())
if dst_img is not None:
self.imageChanged.emit(dst_img)
@staticmethod
def Process(src_img, info):
if src_img is None:
return None
ftype = info['FilterType']
size = info['FilterSize']
sigma0 = float(info['FilterSigma0'])
sigma1 = float(info['FilterSigma1'])
if ftype == 1:
return cv2.blur(src_img, (size, size))
elif ftype == 2:
return cv2.GaussianBlur(src_img, (size, size), sigma0, sigma1)
elif ftype == 3:
return cv2.medianBlur(src_img, size)
elif ftype == 4:
return cv2.bilateralFilter(src_img, size, sigma0, sigma1)
else:
return src_img.copy()
"""
Threshold Widget
"""
class ThresholdWidget(QtWidgets.QWidget):
imageChanged = QtCore.pyqtSignal(np.ndarray)
def __init__(self, parent):
super(ThresholdWidget, self).__init__(parent)
self.src_img = None
self.dst_img = None
self.insitu = True
vbox = QtWidgets.QVBoxLayout(self)
vbox.addWidget(QtWidgets.QLabel('Type :'))
self.combo1 = QtWidgets.QComboBox()
self.combo1.addItem('Simple')
self.combo1.addItem('Otsu')
self.combo1.addItem('Adaptive Mean')
self.combo1.addItem('Adaptive Gauss')
self.combo1.currentIndexChanged[int].connect(self.methodChanged)
vbox.addWidget(self.combo1)
vbox.addWidget(QtWidgets.QLabel('Threshold :'))
self.spin1 = QtWidgets.QSpinBox()
self.spin1.setMinimum(0)
self.spin1.setMaximum(255)
self.spin1.setValue(127)
self.spin1.setEnabled(True)
self.spin1.valueChanged[int].connect(self.setImage)
vbox.addWidget(self.spin1)
vbox.addWidget(QtWidgets.QLabel('Adaptive Block Size :'))
self.spin2 = QtWidgets.QSpinBox()
self.spin2.setMinimum(3)
self.spin2.setMaximum(999)
self.spin2.setValue(123)
self.spin2.setSingleStep(2)
self.spin2.setEnabled(False)
self.spin2.valueChanged[int].connect(self.setImage)
vbox.addWidget(self.spin2)
vbox.addWidget(QtWidgets.QLabel('Adaptive Parm :'))
self.spin3 = QtWidgets.QSpinBox()
self.spin3.setMinimum(-128)
self.spin3.setMaximum(128)
self.spin3.setValue(0)
self.spin3.setEnabled(False)
self.spin3.valueChanged[int].connect(self.setImage)
vbox.addWidget(self.spin3)
self.check1 = QtWidgets.QCheckBox('Invert')
self.check1.stateChanged[int].connect(self.setImage)
vbox.addWidget(self.check1)
vbox.addStretch()
def setInfo(self, info):
self.insitu = False
self.combo1.setCurrentIndex(info['ThreshType'])
self.spin1.setValue(info['ThreshThreshold'])
self.spin2.setValue(info['ThreshAdaptiveBlockSize'])
self.spin3.setValue(info['ThreshAdaptiveParm'])
self.check1.setChecked(info['ThreshInvert'])
self.insitu = True
def getInfo(self):
info = {}
info['ThreshType'] = self.combo1.currentIndex()
info['ThreshThreshold'] = self.spin1.value()
info['ThreshAdaptiveBlockSize'] = self.spin2.value()
info['ThreshAdaptiveParm'] = self.spin3.value()
info['ThreshInvert'] = self.check1.isChecked()
return info
def methodChanged(self):
ttype = self.combo1.currentIndex()
flag = [[True, False, False], [False, False, False],\
[False, True, True], [False, True, True]]
self.spin1.setEnabled(flag[ttype][0])
self.spin2.setEnabled(flag[ttype][1])
self.spin3.setEnabled(flag[ttype][2])
self.setImage()
def setImage(self):
if self.insitu:
dst_img = self.Process(self.src_img, self.getInfo())
if dst_img is not None:
self.imageChanged.emit(dst_img)
@staticmethod
def Process(src_img, info):
if src_img is None:
return None
if len(src_img.shape) == 3:
src_img = cv2.cvtColor(src_img, cv2.COLOR_RGB2GRAY)
ttype = info['ThreshType']
thres = info['ThreshThreshold']
bsize = info['ThreshAdaptiveBlockSize']
parm = info['ThreshAdaptiveParm']
inv = info['ThreshInvert']
if inv:
sty = cv2.THRESH_BINARY_INV
else:
sty = cv2.THRESH_BINARY
if ttype == 0:
ret, dst_img = cv2.threshold(src_img, thres, 255, sty)
elif ttype == 1:
ret, dst_img = cv2.threshold(src_img, 0, 255, sty + cv2.THRESH_OTSU)
elif ttype == 2:
dst_img = cv2.adaptiveThreshold(src_img, 255, cv2.ADAPTIVE_THRESH_MEAN_C, sty, bsize, parm)
elif ttype == 3:
dst_img = cv2.adaptiveThreshold(src_img, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, sty, bsize, parm)
return dst_img
"""
Morphology Widget
"""
class MorphologyWidget(QtWidgets.QWidget):
imageChanged = QtCore.pyqtSignal(np.ndarray)
def __init__(self, parent):
super(MorphologyWidget, self).__init__(parent)
self.src_img = None
self.insitu = True
vbox = QtWidgets.QVBoxLayout(self)
vbox.addWidget(QtWidgets.QLabel('Type :'))
self.combo1 = QtWidgets.QComboBox()
self.combo1.addItem('Opening')
self.combo1.addItem('Closing')
self.combo1.currentIndexChanged[int].connect(self.setImage)
vbox.addWidget(self.combo1)
vbox.addWidget(QtWidgets.QLabel('Iterations :'))
self.spin1 = QtWidgets.QSpinBox()
self.spin1.setMinimum(0)
self.spin1.setMaximum(32)
self.spin1.setValue(0)
self.spin1.valueChanged[int].connect(self.setImage)
vbox.addWidget(self.spin1)
vbox.addStretch()
self.label1 = QtWidgets.QLabel('Black :')
vbox.addWidget(self.label1)
self.label2 = QtWidgets.QLabel('White :')
vbox.addWidget(self.label2)
def setInfo(self, info):
self.insitu = False
self.combo1.setCurrentIndex(info['MorpholType'])
self.spin1.setValue(info['MorpholIterations'])
self.insitu = True
def getInfo(self):
info = {}
info['MorpholType'] = self.combo1.currentIndex()
info['MorpholIterations'] = self.spin1.value()
return info
def setImage(self):
if self.insitu:
dst_img = self.Process(self.src_img, self.getInfo())
if dst_img is not None:
self.imageChanged.emit(dst_img)
hist = cv2.calcHist([dst_img], [0], None, [256], [0,256])
tot = hist[0][0] + hist[255][0]
self.label1.setText('Black : ' + str(100.0 * hist[0][0] / tot) + ' %')
self.label2.setText('White : ' + str(100.0 * hist[255][0] / tot) + ' %')
def clearLabel(self):
self.label1.setText('Black :')
self.label2.setText('White :')
@staticmethod
def Process(src_img, info):
if src_img is None:
return None
mtype = info['MorpholType']
it = info['MorpholIterations']
kernel = np.ones((3,3), np.uint8)
if it > 0:
if mtype == 0:
return cv2.morphologyEx(src_img, cv2.MORPH_OPEN, kernel, iterations=it)
elif mtype == 1:
return cv2.morphologyEx(src_img, cv2.MORPH_CLOSE, kernel, iterations=it)
else:
return src_img
"""
Modify Widget
"""
class ModifyWidget(QtWidgets.QWidget):
imageChanged = QtCore.pyqtSignal(np.ndarray)
mouseModeChanged = QtCore.pyqtSignal(int)
def __init__(self, parent):
super(ModifyWidget, self).__init__(parent)
self.src_img = None
self.org_img = None
self.mod_objects = []
self.insitu = True
vbox = QtWidgets.QVBoxLayout(self)
vbox.addWidget(QtWidgets.QLabel('Source weight :'))
self.spin1 = QtWidgets.QSpinBox()
self.spin1.setMinimum(0)
self.spin1.setMaximum(255)
self.spin1.setValue(255)
self.spin1.valueChanged[int].connect(self.setImage)
vbox.addWidget(self.spin1)
self.check1 = QtWidgets.QCheckBox('Contour draw')
self.check1.setChecked(True)
self.check1.stateChanged[int].connect(self.setImage)
vbox.addWidget(self.check1)
self.check2 = QtWidgets.QCheckBox('Center draw')
self.check2.setChecked(True)
self.check2.stateChanged[int].connect(self.setImage)
vbox.addWidget(self.check2)
self.check4 = QtWidgets.QCheckBox('BoundRect draw')
self.check4.setChecked(True)
self.check4.stateChanged[int].connect(self.setImage)
vbox.addWidget(self.check4)
self.check3 = QtWidgets.QCheckBox('Modify draw')
self.check3.setChecked(True)
self.check3.stateChanged[int].connect(self.setImage)
vbox.addWidget(self.check3)
vbox.addWidget(QtWidgets.QLabel('Modify color :'))
hbox1 = QtWidgets.QHBoxLayout()
vbox.addLayout(hbox1)
self.group1 = QtWidgets.QButtonGroup()
button1 = QtWidgets.QPushButton('Black')
button1.setCheckable(True)
button1.setChecked(True)
self.group1.addButton(button1, 0)
hbox1.addWidget(button1)
button2 = QtWidgets.QPushButton('White')
button2.setCheckable(True)
self.group1.addButton(button2, 1)
hbox1.addWidget(button2)
vbox.addWidget(QtWidgets.QLabel('Modify mode :'))
hbox2 = QtWidgets.QHBoxLayout()
vbox.addLayout(hbox2)
self.group2 = QtWidgets.QButtonGroup()
self.group2.buttonClicked.connect(self.setMouseMode)
button3 = QtWidgets.QPushButton('Line')
button3.setCheckable(True)
button3.setChecked(True)
self.group2.addButton(button3, 0)
hbox2.addWidget(button3)
button4 = QtWidgets.QPushButton('Rect')
button4.setCheckable(True)
self.group2.addButton(button4, 1)
hbox2.addWidget(button4)
button5 = QtWidgets.QPushButton('Clear')
button5.setCheckable(True)
self.group2.addButton(button5, 2)
hbox2.addWidget(button5)
vbox.addWidget(QtWidgets.QLabel('Line width :'))
self.spin2 = QtWidgets.QSpinBox()
self.spin2.setMinimum(1)
self.spin2.setMaximum(16)
self.spin2.setValue(1)
vbox.addWidget(self.spin2)
vbox.addStretch()
def addLine(self, sx, sy, ex, ey):
thickness = self.spin2.value()
if self.group1.checkedId() == 0:
self.mod_objects.append([0, int(sx), int(sy), int(ex), int(ey), 0, thickness])
elif self.group1.checkedId() == 1:
self.mod_objects.append([0, int(sx), int(sy), int(ex), int(ey), 255, thickness])
self.setImage()
def addRect(self, sx, sy, ex, ey):
if self.group1.checkedId() == 0:
self.mod_objects.append([1, int(sx), int(sy), int(ex), int(ey), 0, -1])
elif self.group1.checkedId() == 1:
self.mod_objects.append([1, int(sx), int(sy), int(ex), int(ey), 255, -1])
self.setImage()
def clearRect(self, sx, sy, ex, ey):
ids = []
for i in range(len(self.mod_objects)):
obj = self.mod_objects[i]
if obj[1] > sx and obj[2] > sy and obj[3] < ex and obj[4] < ey:
ids.append(i)
for i in ids[::-1]:
self.mod_objects.pop(i)
self.setImage()
def setMouseMode(self):
if self.insitu:
if self.group2.checkedId() == 0:
self.mouseModeChanged.emit(ImageScene.mouseLine)
elif self.group2.checkedId() == 1:
self.mouseModeChanged.emit(ImageScene.mouseRect)
elif self.group2.checkedId() == 2:
self.mouseModeChanged.emit(ImageScene.mouseClear)
def setInfo(self, info):
self.insitu = False
self.spin1.setValue(info['ModSourceWeight'])
self.check1.setChecked(info['ModContourDraw'])
self.check2.setChecked(info['ModCenterDraw'])
self.check3.setChecked(info['ModModifyDraw'])
self.check4.setChecked(info['ModBoundRectDraw'])
self.group1.button(info['ModModifyColor']).setChecked(True)
self.group2.button(info['ModModifyMode']).setChecked(True)
self.spin2.setValue(info['ModLineWidth'])
self.mod_objects = info['ModModifyObjects']
self.insitu = True
def getInfo(self, with_mod=True):
info = {}
info['ModSourceWeight'] = self.spin1.value()
info['ModContourDraw'] = self.check1.isChecked()
info['ModCenterDraw'] = self.check2.isChecked()
info['ModModifyDraw'] = self.check3.isChecked()
info['ModBoundRectDraw'] = self.check4.isChecked()
info['ModModifyColor'] = self.group1.checkedId()
info['ModModifyMode'] = self.group2.checkedId()
info['ModLineWidth'] = self.spin2.value()
if with_mod:
info['ModModifyObjects'] = self.mod_objects
else:
info['ModModifyObjects'] = []
return info
def setImage(self):
if self.insitu:
dst_img = self.Process(self.src_img, self.org_img, self.getInfo())
if dst_img is not None:
self.imageChanged.emit(dst_img)
@staticmethod
def ProcessMod(src_img, info):
if src_img is None:
return None
mod_img = src_img.copy()
for obj in info['ModModifyObjects']:
if obj[0] == 0:
cv2.line(mod_img, (obj[1], obj[2]), (obj[3], obj[4]), (obj[5], obj[5], obj[5]), thickness=obj[6])
elif obj[0] == 1:
cv2.rectangle(mod_img, (obj[1], obj[2]), (obj[3], obj[4]), (obj[5], obj[5], obj[5]), thickness=obj[6])
return mod_img
@staticmethod
def Process(src_img, org_img, info):
if src_img is None:
return None
mod_img = ModifyWidget.ProcessMod(src_img, info)
conts, hier = cv2.findContours(mod_img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
draw_img = np.zeros(org_img.shape, dtype=np.uint8)
subw = 255 - info['ModSourceWeight']
cv2.subtract(org_img, (subw, subw, subw, subw), draw_img)
if info['ModModifyDraw']:
for obj in info['ModModifyObjects']:
if obj[0] == 0:
cv2.line(draw_img, (obj[1], obj[2]), (obj[3], obj[4]), (obj[5], obj[5], obj[5]), thickness=obj[6])
elif obj[0] == 1:
cv2.rectangle(draw_img, (obj[1], obj[2]), (obj[3], obj[4]), (obj[5], obj[5], obj[5]), thickness=obj[6])
if info['ModCenterDraw']:
for cont in conts:
mom = cv2.moments(cont)
if mom['m00'] != 0:
x = int(mom['m10'] / mom['m00'])
y = int(mom['m01'] / mom['m00'])
cv2.line(draw_img, (x, y - 3), (x, y + 3), (0, 0, 255))
cv2.line(draw_img, (x - 3, y), (x + 3, y), (0, 0, 255))
if info['ModBoundRectDraw']:
for cont in conts:
rect = cv2.minAreaRect(cont)
box = cv2.boxPoints(rect)
box = np.int0(box)
cv2.drawContours(draw_img, [box], 0, (255, 0, 0), 1)
if info['ModContourDraw']:
cv2.drawContours(draw_img, conts, -1, (255,255,0), 1)
return draw_img
"""
Misc Functions
"""
def PlotUnitText(unit):
if unit == 'um':
return '$\mu$m'
else:
return unit
def BinaryImage(info):
org_img = cv2.imread(info['FileName'], 1)
roi_img = FileWidget.Process(org_img, info)
cont_img = ContrastWidget.Process(roi_img, info)
filter_img = FilterWidget.Process(cont_img, info)
thresh_img = ThresholdWidget.Process(filter_img, info)
morphol_img = MorphologyWidget.Process(thresh_img, info)
mod_img = ModifyWidget.ProcessMod(morphol_img, info)
return mod_img
"""
Measure IMFP Thread
"""
class MeasureIMFPThread(QtCore.QThread):
Progress = QtCore.pyqtSignal(int, str, list)
def __init__(self, parent=None):
super(MeasureIMFPThread, self).__init__(parent)
self.barrier = 255
self.dpix = None
self.nsample = 0
self.seed = 0
self.freq = None
self.stat = None
self.psmax = None
self.image_info = None
def setup(self, barrier, nsample, pixmax, seed, image_info):
self.image_info = []
self.psmax = [0.0, 'pixel']
for info in image_info:
if info['FileMeasure']:
self.image_info.append(info)
ps = FileWidget.PixelSize(info)
if ps[0] > self.psmax[0]:
self.psmax = ps
self.barrier = barrier
self.nsample = nsample
self.freq = np.zeros((2, pixmax), dtype=np.uint32)
self.seed = seed
def run(self):
inc = 100 / len(self.image_info)
for info in self.image_info:
fname = info['FileName']
ps = FileWidget.PixelSize(info)
filename = QtCore.QFileInfo(fname).fileName()
bimg = BinaryImage(info)
dpix = self.psmax[0] / ps[0]
MPImfp.measure(bimg, self.barrier, self.freq[0], dpix, self.nsample, self.seed, 0)
self.seed = MPImfp.measure(bimg, self.barrier, self.freq[1], dpix, self.nsample, self.seed, 1)
self.Progress.emit(inc, filename, ps)
self.stat = [self.Statistics(self.freq[0], self.psmax),\
self.Statistics(self.freq[1], self.psmax)]
self.finished.emit()
@staticmethod
def RelativeFrequency(freq):
return np.array(freq, dtype=np.float) / np.sum(freq)
@staticmethod
def Statistics(freq, ps):
tot = np.sum(freq)
rfreq = np.array(freq, dtype=np.float) / tot
length = np.arange(len(freq)) * ps[0]
ave = np.sum(length * rfreq)
var = np.sum(np.power(length - ave, 2) * rfreq)
std = m.sqrt(var)
return [tot, ave, std]
"""
IMFP Dialog
"""
class IMFPDialog(QtWidgets.QDialog):
def __init__(self, parent):
QtWidgets.QDialog.__init__(self, parent)
self.setWindowTitle("IMFP")
self.parent = parent
self.insitu = True
self.freq = None
self.stat = None
self.psmax = None
self.measure = MeasureIMFPThread()
self.measure.finished.connect(self.measureFinish)
self.measure.Progress.connect(self.measureProgress)
# self.pdlg = QtWidgets.QProgressDialog(self)
# self.pdlg.setWindowTitle("Measuring IMFP ...")
# self.pdlg.canceled.connect(self.measureCancel)
hbox = QtWidgets.QHBoxLayout(self)
vbox = QtWidgets.QVBoxLayout()
hbox.addLayout(vbox)
self.viewer = QtWidgets.QGraphicsView()
self.scene = QtWidgets.QGraphicsScene()
self.viewer.setScene(self.scene)
self.figure = Figure()
self.canvas = FigureCanvas(self.figure)
self.scene.addWidget(self.canvas)
hbox.addWidget(self.viewer)
vbox.addWidget(QtWidgets.QLabel('Barrier :'))
self.combo0 = QtWidgets.QComboBox()
self.combo0.addItem('White')
self.combo0.addItem('Black')
vbox.addWidget(self.combo0)
vbox.addWidget(QtWidgets.QLabel('NSample (x10000) :'))
self.spin1 = QtWidgets.QSpinBox()
self.spin1.setMinimum(1)
self.spin1.setMaximum(1000)
self.spin1.setValue(100)
vbox.addWidget(self.spin1)
vbox.addWidget(QtWidgets.QLabel('NClass :'))
self.spin2 = QtWidgets.QSpinBox()
self.spin2.setMinimum(100)
self.spin2.setMaximum(10000)
self.spin2.setValue(5000)
vbox.addWidget(self.spin2)
vbox.addWidget(QtWidgets.QLabel('Seed :'))
self.line1 = QtWidgets.QLineEdit()
seed = random.randint(1, 1000000000)
self.line1.setText(str(seed))
vbox.addWidget(self.line1)
self.button1 = QtWidgets.QPushButton('Measure')
self.button1.clicked[bool].connect(self.measureIMFP)
vbox.addWidget(self.button1)
self.treeview = QtWidgets.QTreeView()
self.treemodel = QtGui.QStandardItemModel()
self.treemodel.setHorizontalHeaderLabels(['Files', 'PS', 'Unit'])
self.treeview.setModel(self.treemodel)
self.treeview.header().setStretchLastSection(False)
# self.treeview.header().setResizeMode(0, QtWidgets.QHeaderView.Stretch)
# self.treeview.header().setResizeMode(1, QtWidgets.QHeaderView.ResizeToContents)
# self.treeview.header().setResizeMode(2, QtWidgets.QHeaderView.ResizeToContents)
vbox.addWidget(self.treeview)
vbox.addWidget(QtWidgets.QLabel('Type :'))
self.combo1 = QtWidgets.QComboBox()
self.combo1.addItem('Single')
self.combo1.addItem('Double')
self.combo1.currentIndexChanged.connect(self.drawGraph)
vbox.addWidget(self.combo1)
vbox.addWidget(QtWidgets.QLabel('Plot NClass :'))
self.spin3 = QtWidgets.QSpinBox()
self.spin3.setMinimum(10)
self.spin3.setMaximum(10000)
self.spin3.setValue(5000)
self.spin3.valueChanged.connect(self.drawGraph)
vbox.addWidget(self.spin3)
self.check1 = QtWidgets.QCheckBox('Relative Frequency')
self.check1.setChecked(True)
self.check1.stateChanged[int].connect(self.drawGraph)
vbox.addWidget(self.check1)
self.check2 = QtWidgets.QCheckBox('Show Statistics')
self.check2.setChecked(True)
self.check2.stateChanged[int].connect(self.drawGraph)
vbox.addWidget(self.check2)
vbox.addWidget(QtWidgets.QLabel('DPI :'))
self.spin4 = QtWidgets.QSpinBox()
self.spin4.setMinimum(10)
self.spin4.setMaximum(3000)
self.spin4.setValue(100)
vbox.addWidget(self.spin4)
vbox.addStretch()
hbox1 = QtWidgets.QHBoxLayout()
vbox.addLayout(hbox1)
self.button2 = QtWidgets.QPushButton('Save CSV')
self.button2.clicked[bool].connect(self.saveCSV)
hbox1.addWidget(self.button2)
self.button3 = QtWidgets.QPushButton('Save Graph')
self.button3.clicked[bool].connect(self.saveGraph)
hbox1.addWidget(self.button3)
self.button4 = QtWidgets.QPushButton('Close')
self.button4.clicked[bool].connect(self.close)
hbox1.addWidget(self.button4)
def setInfo(self, info):
self.combo0.setCurrentIndex(info['Barrier'])
self.spin1.setValue(info['NSample'])
self.spin2.setValue(info['NClass'])
self.line1.setText(info['Seed'])
self.insitu = False
self.combo1.setCurrentIndex(info['Type'])
self.spin3.setValue(info['PlotClassMax'])
self.check1.setChecked(info['RelativeFrequency'])
self.check2.setChecked(info['ShowStatistics'])
self.insitu = True
self.spin4.setValue(info['DPI'])
self.treemodel.removeRows(0, self.treemodel.rowCount())
def getInfo(self):
info = {}
info['Barrier'] = self.combo0.currentIndex()
info['NSample'] = self.spin1.value()
info['NClass'] = self.spin2.value()
info['Seed'] = str(self.line1.text())
info['Type'] = self.combo1.currentIndex()
info['PlotClassMax'] = self.spin3.value()
info['RelativeFrequency'] = self.check1.isChecked()
info['ShowStatistics'] = self.check2.isChecked()
info['DPI'] = self.spin4.value()
return info
def measureIMFP(self):
if len(self.parent.image_info) > 0:
if self.combo0.currentIndex() == 1:
barrier = 0
else:
barrier = 255
nsample = self.spin1.value() * 10000
pixmax = self.spin2.value()
seed = int(self.line1.text())
self.clearFreq()
self.pdlg = QtWidgets.QProgressDialog(self)
self.pdlg.setWindowTitle("Measuring IMFP ...")
self.pdlg.canceled.connect(self.measureCancel)
self.pdlg.setValue(0)
self.measure.setup(barrier, nsample, pixmax, seed, self.parent.image_info)
self.measure.start()
def measureProgress(self, inc, filename, ps):
val = self.pdlg.value()
self.pdlg.setValue(val + inc)
root = self.treemodel.invisibleRootItem()
item1 = QtGui.QStandardItem(filename)
item1.setEditable(False)
item2 = QtGui.QStandardItem('%.3f' % ps[0])
item2.setEditable(False)
item3 = QtGui.QStandardItem(ps[1])
item3.setEditable(False)
root.appendRow([item1, item2, item3])
def measureFinish(self):
self.pdlg.close()
if self.measure.freq is not None and self.measure.stat is not None:
self.freq = self.measure.freq
self.stat = self.measure.stat
self.psmax = self.measure.psmax
self.drawGraph()
def measureCancel(self):
self.measure.terminate()
def drawIMFP(self, freq, pixmax, dsize, xlabel, ylabel, stat):
self.figure.clf()
ax = self.figure.add_subplot(1,1,1)
length = np.arange(freq.size) * dsize
ax.plot(length, freq, 'k-', lw=1)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.set_xlim(0, pixmax * dsize)
if stat is not None:
ax.text(0.6, 0.95, 'NSample : ' + str(stat[0]), transform=ax.transAxes)
ax.text(0.6, 0.9, 'Mean : ' + str(stat[1]), transform=ax.transAxes)
ax.text(0.6, 0.85, 'STD : ' + str(stat[2]), transform=ax.transAxes)
self.canvas.draw()
def drawGraph(self):
if self.freq is None or self.insitu == False:
return
if self.combo1.currentIndex() == 0:
find = 0
xlabel = 'Single length (%s)' % PlotUnitText(self.psmax[1])
elif self.combo1.currentIndex() == 1:
find = 1
xlabel = 'Double length (%s)' % PlotUnitText(self.psmax[1])
if self.check1.isChecked():
freq = MeasureIMFPThread.RelativeFrequency(self.freq[find])
ylabel = 'Relative Frequency'
else:
freq = self.freq[find]
ylabel = 'Frequency'
if self.check2.isChecked():
stat = self.stat[find]
else:
stat = None
pixmax = self.spin3.value()
self.drawIMFP(freq, pixmax, self.psmax[0], xlabel, ylabel, stat)
def saveCSV(self):
if self.freq is None:
return
fname = QtWidgets.QFileDialog.getSaveFileName(self, 'Save CSV', filter='CSV Files (*.csv);;All Files (*.*)')[0]
if fname:
fout = open(fname, 'w')
nimg = self.treemodel.rowCount()
fout.write('Images,' + str(nimg) + '\n')
fout.write('ImageID, FileName, PixelSize, Unit\n')
for i in range(nimg):
fname = self.treemodel.item(i,0).text()
ps = self.treemodel.item(i,1).text()
unit = self.treemodel.item(i,2).text()
fout.write('%d, %s, %s, %s\n' % (i, fname, ps, unit))
fout.write('Statistics, Total, Mean, STD\n')
tt = ['Single', 'Double']
c = 0
for st in self.stat:
fout.write('%s, %d, %f, %f\n' % (tt[c], st[0], st[1], st[2]))
c += 1
fout.write('Class, Length, SingleF, SingleRF, DoubleF, DoubleRF\n')
f0 = self.freq[0]
f1 = self.freq[1]
rf0 = MeasureIMFPThread.RelativeFrequency(f0)
rf1 = MeasureIMFPThread.RelativeFrequency(f1)
for i in range(len(f0)):
length = i * self.psmax[0]
fout.write('%d, %f, %d, %f, %d, %f\n' % (i, length, f0[i], rf0[i], f1[i], rf1[i]))
def saveGraph(self):
if self.freq is None:
return
fname = QtWidgets.QFileDialog.getSaveFileName(self, 'Save Graph', filter='Image Files (*.png *.pdf *.svg);;All Files (*.*)')[0]
if fname:
self.figure.savefig(str(fname), dpi=self.spin4.value())
def clearFreq(self):
self.freq = None
self.stat = None
self.psmax = None
self.treemodel.removeRows(0, self.treemodel.rowCount())
self.figure.clf()
self.canvas.draw()
"""
Measure LN2D Thread
"""
class MeasureLN2DThread(QtCore.QThread):
Progress = QtCore.pyqtSignal(int, str, list)
def __init__(self, parent=None):
super(MeasureLN2DThread, self).__init__(parent)
self.nsample = 0
self.freq = None
self.image_info = None
self.ln2d = None
self.af = 0
self.stat = None
def setup(self, nsample, lnmax, seed, image_info):
self.image_info = []
for info in image_info:
if info['FileMeasure']:
self.image_info.append(info)
self.nsample = nsample
self.freq = np.zeros((2, lnmax), dtype=np.uint32)
self.ln2d = MPLn23d.ln2d_new(len(self.image_info))
self.ln2d.seed = seed
def run(self):
inc = 100 / len(self.image_info)
for info in self.image_info:
fname = info['FileName']
ps = FileWidget.PixelSize(info)
filename = QtCore.QFileInfo(fname).fileName()
img = BinaryImage(info)
conts, hier = cv2.findContours(img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
self.AddSec(self.ln2d, img, conts, ps)
self.Progress.emit(inc, filename, ps)
self.ln2d.measure_gc(self.freq[0])
self.ln2d.measure_random(self.freq[1], self.nsample)
self.af = self.ln2d.area_fraction()
self.stat = [self.Statistics(self.freq[0]), self.Statistics(self.freq[1])]
self.finished.emit()
@staticmethod
def AddSec(ln2d, img, conts, ps):
sx = img.shape[1] * ps[0]
sy = img.shape[0] * ps[0]
sid = ln2d.add_sec(len(conts), sx, sy)
for cont in conts:
mom = cv2.moments(cont)
if mom['m00'] != 0:
x = mom['m10'] / mom['m00'] * ps[0]
y = mom['m01'] / mom['m00'] * ps[0]
area = cv2.contourArea(cont)
r = m.sqrt(area / m.pi) * ps[0]
ln2d.add_gc(sid, x, y, r)
return sid
@staticmethod
def RelativeFrequency(freq):
return np.array(freq, dtype=np.float) / np.sum(freq)
@staticmethod
def Statistics(freq):
tot = np.sum(freq)
rfreq = np.array(freq, dtype=np.float) / tot
length = np.arange(len(freq))
ave = np.sum(length * rfreq)
var = np.sum(np.power(length - ave, 2) * rfreq)
return [tot, ave, var]
"""
LN2D Dialog
"""
class LN2DDialog(QtWidgets.QDialog):
def __init__(self, parent):
QtWidgets.QDialog.__init__(self, parent)
self.setWindowTitle("LN2D")
self.parent = parent
self.insitu = True
self.freq = None
self.stat = None
self.measure = MeasureLN2DThread()
self.measure.finished.connect(self.measureFinish)
self.measure.Progress.connect(self.measureProgress)
hbox = QtWidgets.QHBoxLayout(self)
vbox = QtWidgets.QVBoxLayout()
hbox.addLayout(vbox)
self.viewer = QtWidgets.QGraphicsView()
self.scene = QtWidgets.QGraphicsScene()
self.viewer.setScene(self.scene)
self.figure = Figure()
self.canvas = FigureCanvas(self.figure)
self.scene.addWidget(self.canvas)
hbox.addWidget(self.viewer)
vbox.addWidget(QtWidgets.QLabel('NSample (x10000) :'))
self.spin1 = QtWidgets.QSpinBox()
self.spin1.setMinimum(1)
self.spin1.setMaximum(1000)
self.spin1.setValue(100)
vbox.addWidget(self.spin1)
vbox.addWidget(QtWidgets.QLabel('LN Max :'))
self.spin2 = QtWidgets.QSpinBox()
self.spin2.setMinimum(10)
self.spin2.setMaximum(1000)
self.spin2.setValue(100)
vbox.addWidget(self.spin2)
vbox.addWidget(QtWidgets.QLabel('Seed :'))
self.line1 = QtWidgets.QLineEdit()
seed = random.randint(1, 1000000000)
self.line1.setText(str(seed))
vbox.addWidget(self.line1)
self.button1 = QtWidgets.QPushButton('Measure')
self.button1.clicked[bool].connect(self.measureLN2D)
vbox.addWidget(self.button1)
self.treeview = QtWidgets.QTreeView()
self.treemodel = QtGui.QStandardItemModel()
self.treemodel.setHorizontalHeaderLabels(['Files', 'PS', 'Unit'])
self.treeview.setModel(self.treemodel)
self.treeview.header().setStretchLastSection(False)
# self.treeview.header().setResizeMode(0, QtWidgets.QHeaderView.Stretch)
# self.treeview.header().setResizeMode(1, QtWidgets.QHeaderView.ResizeToContents)
# self.treeview.header().setResizeMode(2, QtWidgets.QHeaderView.ResizeToContents)
vbox.addWidget(self.treeview)
vbox.addWidget(QtWidgets.QLabel('Type :'))
self.combo1 = QtWidgets.QComboBox()
self.combo1.addItem('Gravity center')
self.combo1.addItem('Random')
self.combo1.currentIndexChanged.connect(self.drawGraph)
vbox.addWidget(self.combo1)
vbox.addWidget(QtWidgets.QLabel('Plot LN Max :'))
self.spin3 = QtWidgets.QSpinBox()
self.spin3.setMinimum(1)
self.spin3.setMaximum(1000)
self.spin3.setValue(30)
self.spin3.valueChanged.connect(self.drawGraph)
vbox.addWidget(self.spin3)
vbox.addWidget(QtWidgets.QLabel('Area fraction :'))
self.spin4 = QtWidgets.QSpinBox()
self.spin4.setMinimum(0)
self.spin4.setMaximum(100)
self.spin4.setValue(0)
self.spin4.valueChanged.connect(self.drawGraph)
vbox.addWidget(self.spin4)
self.check1 = QtWidgets.QCheckBox('Relative Frequency')
self.check1.setChecked(True)
self.check1.stateChanged[int].connect(self.drawGraph)
vbox.addWidget(self.check1)
self.check2 = QtWidgets.QCheckBox('Show Statistics')
self.check2.setChecked(True)
self.check2.stateChanged[int].connect(self.drawGraph)
vbox.addWidget(self.check2)
self.check3 = QtWidgets.QCheckBox('Show Reference')
self.check3.setChecked(True)
self.check3.stateChanged[int].connect(self.drawGraph)
vbox.addWidget(self.check3)
vbox.addWidget(QtWidgets.QLabel('DPI :'))
self.spin5 = QtWidgets.QSpinBox()
self.spin5.setMinimum(10)
self.spin5.setMaximum(3000)
self.spin5.setValue(100)
vbox.addWidget(self.spin5)
vbox.addStretch()
hbox1 = QtWidgets.QHBoxLayout()
vbox.addLayout(hbox1)
self.button2 = QtWidgets.QPushButton('Save CSV')
self.button2.clicked[bool].connect(self.saveCSV)
hbox1.addWidget(self.button2)
self.button3 = QtWidgets.QPushButton('Save Graph')
self.button3.clicked[bool].connect(self.saveGraph)
hbox1.addWidget(self.button3)
self.button4 = QtWidgets.QPushButton('Close')
self.button4.clicked[bool].connect(self.close)
hbox1.addWidget(self.button4)
def setInfo(self, info):
self.spin1.setValue(info['NSample'])
self.spin2.setValue(info['LNMax'])
self.line1.setText(info['Seed'])
self.insitu = False
self.combo1.setCurrentIndex(info['Type'])
self.spin3.setValue(info['PlotLNMax'])
self.spin4.setValue(info['AreaFraction'])
self.check1.setChecked(info['RelativeFrequency'])
self.check2.setChecked(info['ShowStatistics'])
self.check3.setChecked(info['ShowReference'])
self.insitu = True
self.spin5.setValue(info['DPI'])
self.treemodel.removeRows(0, self.treemodel.rowCount())
def getInfo(self):
info = {}
info['NSample'] = self.spin1.value()
info['LNMax'] = self.spin2.value()
info['Seed'] = str(self.line1.text())
info['Type'] = self.combo1.currentIndex()
info['PlotLNMax'] = self.spin3.value()
info['AreaFraction'] = self.spin4.value()
info['RelativeFrequency'] = self.check1.isChecked()
info['ShowStatistics'] = self.check2.isChecked()
info['ShowReference'] = self.check3.isChecked()
info['DPI'] = self.spin5.value()
return info
def measureLN2D(self):
if len(self.parent.image_info) > 0:
nsample = self.spin1.value() * 10000
lnmax = self.spin2.value()
seed = int(self.line1.text())
self.clearFreq()
self.pdlg = QtWidgets.QProgressDialog(self)
self.pdlg.setWindowTitle("Measuring LN2D ...")
self.pdlg.canceled.connect(self.measureCancel)
self.pdlg.setValue(0)
self.measure.setup(nsample, lnmax, seed, self.parent.image_info)
self.measure.start()
def measureProgress(self, inc, filename, ps):
val = self.pdlg.value()
self.pdlg.setValue(val + inc)
root = self.treemodel.invisibleRootItem()
item1 = QtGui.QStandardItem(filename)
item1.setEditable(False)
item2 = QtGui.QStandardItem('%.3f' % ps[0])
item2.setEditable(False)
item3 = QtGui.QStandardItem(ps[1])
item3.setEditable(False)
root.appendRow([item1, item2, item3])
def measureFinish(self):
self.pdlg.close()
if self.measure.freq is not None and self.measure.stat is not None:
self.freq = self.measure.freq
self.stat = self.measure.stat
self.spin4.setValue(int(self.measure.af * 100.0))
self.drawGraph()
def measureCancel(self):
self.measure.terminate()
def drawLN2D(self, freq, lnmax, xlabel, ylabel, stat, prob):
self.figure.clf()
ax = self.figure.add_subplot(1,1,1)
ax.bar(np.arange(freq.size) - 0.4, freq, color='white', edgecolor='black')
if prob != None:
ax.plot(prob[0], prob[1], 'k-', lw=1)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.set_xlim(-0.5, lnmax)
if stat is not None:
ax.text(0.5, 0.95, 'NSample : ' + str(stat[0]), transform=ax.transAxes)
ax.text(0.5, 0.9, 'Av. : ' + str(stat[1]), transform=ax.transAxes)
ax.text(0.5, 0.85, 'Var. : ' + str(stat[2]), transform=ax.transAxes)
if prob != None:
ax.text(0.5, 0.80, 'Ref. Av. : ' + str(prob[2]), transform=ax.transAxes)
ax.text(0.5, 0.75, 'Ref. Var. : ' + str(prob[3]), transform=ax.transAxes)
self.canvas.draw()
def drawGraph(self):
if self.freq is None or self.insitu == False:
return
if self.combo1.currentIndex() == 0:
find = 0
xlabel = 'LN2D'
prob = self.ln2d_prob(0.01 * self.spin4.value())
elif self.combo1.currentIndex() == 1:
find = 1
xlabel = 'LN2DR'
prob = self.ln2dr_prob(0.01 * self.spin4.value())
if self.check1.isChecked():
freq = self.measure.RelativeFrequency(self.freq[find])
ylabel = 'Relative Frequency'
else:
freq = self.freq[find]
prob[1] = prob[1] * np.sum(freq)
ylabel = 'Frequency'
if self.check2.isChecked():
stat = self.stat[find]
else:
stat = None
if self.check3.isChecked() is not True:
prob = None
lnmax = self.spin3.value()
self.drawLN2D(freq, lnmax, xlabel, ylabel, stat, prob)
def poisson_prob(self, a, b):
x = np.arange(b - 1 + 0.1, 100, 0.1)
y = np.zeros(x.size, dtype=np.float)
for i in range(x.size):
y[i] = m.pow(a, x[i] - b) / m.gamma(x[i] - b + 1) * m.exp(-a)
return [x, y, a + b, a]
def ln2d_prob(self, af):
p, q, r, s = 6.1919, 5.8194, 5.1655, 5.7928
a = p * (m.exp(-q * af) - 1) + 7
b = r * (1 - m.exp(-s * af)) + 1
return self.poisson_prob(a, b)
def ln2dr_prob(self, af):
p, q = 5.8277, 6.0755
a = p * (m.exp(-q * af) - 1) + 7
b = p * (1 - m.exp(-q * af))
return self.poisson_prob(a, b)
def saveCSV(self):
fname = QtWidgets.QFileDialog.getSaveFileName(self, 'Save CSV', filter='CSV Files (*.csv);;All Files (*.*)')[0]
if fname:
fout = open(fname, 'w')
nimg = self.treemodel.rowCount()
fout.write('Images,' + str(nimg) + '\n')
fout.write('ImageID, FileName, PixelSize, Unit\n')
for i in range(nimg):
fname = self.treemodel.item(i,0).text()
ps = self.treemodel.item(i,1).text()
unit = self.treemodel.item(i,2).text()
fout.write('%d, %s, %s, %s\n' % (i, fname, ps, unit))
fout.write('Statistics, Total, Average, Variance\n')
tt = ['GC', 'Random']
c = 0
for st in self.stat:
fout.write('%s, %d, %f, %f\n' % (tt[c], st[0], st[1], st[2]))
c += 1
fout.write('LN, GC, GCRF, Random, RandomRF\n')
rf0 = self.measure.RelativeFrequency(self.freq[0])
rf1 = self.measure.RelativeFrequency(self.freq[1])
for i in range(len(self.freq[0])):
fout.write('%d, %d, %f, %d, %f\n' % (i, self.freq[0,i], rf0[i], self.freq[1,i], rf1[i]))
def saveGraph(self):
fname = QtWidgets.QFileDialog.getSaveFileName(self, 'Save Graph', filter='Image Files (*.png *.pdf *.svg);;All Files (*.*)')[0]
if fname:
self.figure.savefig(str(fname), dpi=self.spin5.value())
def clearFreq(self):
self.freq = None
self.stat = None
self.treemodel.removeRows(0, self.treemodel.rowCount())
self.figure.clf()
self.canvas.draw()
"""
Measure Size Thread
"""
class MeasureSizeThread(QtCore.QThread):
Progress = QtCore.pyqtSignal(int, str, list)
def __init__(self, parent=None):
super(MeasureSizeThread, self).__init__(parent)
self.image_info = None
self.data = None
self.stat = None
self.psmin = None
self.afnd = None
def setup(self, image_info):
self.image_info = []
for info in image_info:
if info['FileMeasure']:
self.image_info.append(info)
def run(self):
inc = 100 / len(self.image_info)
self.data = []
imgarea = 0.0
self.psmin = [1.0e12, 'pixel']
ind = 0
for info in self.image_info:
fname = info['FileName']
filename = QtCore.QFileInfo(fname).fileName()
ps = FileWidget.PixelSize(info)
img = BinaryImage(info)
conts, hier = cv2.findContours(img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
self.data.extend(self.Measure(conts, ps, ind))
if ps[0] < self.psmin[0]:
self.psmin = ps
imgarea += img.shape[1] * img.shape[0] * ps[0] * ps[0]
self.Progress.emit(inc, filename, ps)
ind += 1
self.stat = self.Statistics(self.data)
self.afnd = [self.AreaFraction(self.data, imgarea),\
self.NumberDensity(self.data, imgarea)]
self.finished.emit()
@staticmethod
def AreaFraction(data, imgarea):
ta = 0.0
for dt in data:
r = dt[3] / 2.0
ta += m.pi * r * r
return 100.0 * ta / imgarea
@staticmethod
def NumberDensity(data, imgarea):
return len(data) / imgarea
@staticmethod
def Measure(conts, ps, index):
data = []
for cont in conts:
mom = cv2.moments(cont)
if mom['m00'] != 0:
x = mom['m10'] / mom['m00'] * ps[0]
y = mom['m01'] / mom['m00'] * ps[0]
area = cv2.contourArea(cont)
dia = 2.0 * m.sqrt(area / m.pi) * ps[0]
pos, size, ang = cv2.minAreaRect(cont)
per = cv2.arcLength(cont, True)
cir = 4.0 * m.pi * area / (per * per)
if size[0] >= size[1]:
ls = size[0] * ps[0]
ns = size[1] * ps[0]
asp = size[1] / size[0]
ang += 90
else:
ls = size[1] * ps[0]
ns = size[0] * ps[0]
asp = size[0] / size[1]
ang += 180
data.append([index, x, y, dia, ls, ns, asp, cir, ang])
return data
@staticmethod
def Statistics(data):
dia = np.empty(len(data))
ls = np.empty(len(data))
ns = np.empty(len(data))
asp = np.empty(len(data))
cir = np.empty(len(data))
ang = np.empty(len(data))
for i in range(len(data)):
dia[i] = data[i][3]
ls[i] = data[i][4]
ns[i] = data[i][5]
asp[i] = data[i][6]
cir[i] = data[i][7]
ang[i] = data[i][8]
stat = []
stat.append([np.mean(dia), np.std(dia)])
stat.append([np.mean(ls), np.std(ls)])
stat.append([np.mean(ns), np.std(ns)])
stat.append([np.mean(asp), np.std(asp)])
stat.append([np.mean(cir), np.std(cir)])
stat.append([np.mean(ang), np.std(ang)])
return stat
@staticmethod
def Frequency(data, col, nclass, dsize):
freq = np.zeros(nclass, dtype=np.uint32)
for dat in data:
ind = int(dat[col] / dsize)
if ind < nclass:
freq[ind] += 1
return freq
@staticmethod
def RelativeFrequency(data, col, nclass, dsize):
freq = MeasureSizeThread.Frequency(data, col, nclass, dsize)
rfreq = np.array(freq, dtype=np.float) / np.sum(freq)
return rfreq
"""
Size Dialog
"""
class SizeDialog(QtWidgets.QDialog):
def __init__(self, parent):
QtWidgets.QDialog.__init__(self, parent)
self.setWindowTitle("Size")
self.parent = parent
self.insitu = True
self.data = None
self.stat = None
self.psmin = None
self.afnd = None
self.measure = MeasureSizeThread()
self.measure.finished.connect(self.measureFinish)
self.measure.Progress.connect(self.measureProgress)
hbox = QtWidgets.QHBoxLayout(self)
vbox = QtWidgets.QVBoxLayout()
hbox.addLayout(vbox)
self.viewer = QtWidgets.QGraphicsView()
self.scene = QtWidgets.QGraphicsScene()
self.viewer.setScene(self.scene)
self.figure = Figure()
self.canvas = FigureCanvas(self.figure)
self.scene.addWidget(self.canvas)
hbox.addWidget(self.viewer)
self.button1 = QtWidgets.QPushButton('Measure')
self.button1.clicked[bool].connect(self.measureSize)
vbox.addWidget(self.button1)
self.treeview = QtWidgets.QTreeView()
self.treemodel = QtGui.QStandardItemModel()
self.treemodel.setHorizontalHeaderLabels(['Files', 'PS', 'Unit'])
self.treeview.setModel(self.treemodel)
self.treeview.header().setStretchLastSection(False)
# self.treeview.header().setResizeMode(0, QtWidgets.QHeaderView.Stretch)
# self.treeview.header().setResizeMode(1, QtWidgets.QHeaderView.ResizeToContents)
# self.treeview.header().setResizeMode(2, QtWidgets.QHeaderView.ResizeToContents)
vbox.addWidget(self.treeview)
self.labelaf = QtWidgets.QLabel('AF :')
vbox.addWidget(self.labelaf)
self.labelnd = QtWidgets.QLabel('ND :')
vbox.addWidget(self.labelnd)
vbox.addWidget(QtWidgets.QLabel('Type :'))
self.combo1 = QtWidgets.QComboBox()
self.combo1.addItem('Diameter')
self.combo1.addItem('Long side')
self.combo1.addItem('Narrow side')
self.combo1.addItem('Aspect ratio')
self.combo1.addItem('Circularity')
self.combo1.addItem('Angle')
self.combo1.currentIndexChanged.connect(self.drawGraph)
vbox.addWidget(self.combo1)
vbox.addWidget(QtWidgets.QLabel('NClass :'))
self.spin1 = QtWidgets.QSpinBox()
self.spin1.setMinimum(1)
self.spin1.setMaximum(500)
self.spin1.setValue(100)
self.spin1.valueChanged.connect(self.drawGraph)
vbox.addWidget(self.spin1)
vbox.addWidget(QtWidgets.QLabel('DSize :'))
self.line1 = QtWidgets.QLineEdit()
self.line1.setValidator(QtGui.QDoubleValidator())
self.line1.textChanged[str].connect(self.drawGraph)
self.line1.setText('1.0')
vbox.addWidget(self.line1)
self.check1 = QtWidgets.QCheckBox('Relative Frequency')
self.check1.setChecked(True)
self.check1.stateChanged[int].connect(self.drawGraph)
vbox.addWidget(self.check1)
self.check2 = QtWidgets.QCheckBox('Show Statistics')
self.check2.setChecked(True)
self.check2.stateChanged[int].connect(self.drawGraph)
vbox.addWidget(self.check2)
vbox.addWidget(QtWidgets.QLabel('DPI :'))
self.spin2 = QtWidgets.QSpinBox()
self.spin2.setMinimum(10)
self.spin2.setMaximum(3000)
self.spin2.setValue(100)
vbox.addWidget(self.spin2)
vbox.addStretch()
hbox1 = QtWidgets.QHBoxLayout()
vbox.addLayout(hbox1)
self.button2 = QtWidgets.QPushButton('Save CSV')
self.button2.clicked[bool].connect(self.saveCSV)
hbox1.addWidget(self.button2)
self.button3 = QtWidgets.QPushButton('Save Graph')
self.button3.clicked[bool].connect(self.saveGraph)
hbox1.addWidget(self.button3)
self.button4 = QtWidgets.QPushButton('Close')
self.button4.clicked[bool].connect(self.close)
hbox1.addWidget(self.button4)
def setInfo(self, info):
self.insitu = False
self.combo1.setCurrentIndex(info['Type'])
self.spin1.setValue(info['NClass'])
self.line1.setText(info['DSize'])
self.check1.setChecked(info['RelativeFrequency'])
self.check2.setChecked(info['ShowStatistics'])
self.insitu = True
self.spin2.setValue(info['DPI'])
self.treemodel.removeRows(0, self.treemodel.rowCount())
def getInfo(self):
info = {}
info['Type'] = self.combo1.currentIndex()
info['NClass'] = self.spin1.value()
info['DSize'] = str(self.line1.text())
info['RelativeFrequency'] = self.check1.isChecked()
info['ShowStatistics'] = self.check2.isChecked()
info['DPI'] = self.spin2.value()
return info
def measureSize(self):
if len(self.parent.image_info) > 0:
self.clearData()
self.pdlg = QtWidgets.QProgressDialog(self)
self.pdlg.setWindowTitle("Measuring Size ...")
self.pdlg.canceled.connect(self.measureCancel)
self.pdlg.setValue(0)
self.measure.setup(self.parent.image_info)
self.measure.start()
def measureProgress(self, inc, filename, ps):
val = self.pdlg.value()
self.pdlg.setValue(val + inc)
root = self.treemodel.invisibleRootItem()
item1 = QtGui.QStandardItem(filename)
item1.setEditable(False)
item2 = QtGui.QStandardItem('%.3f' % ps[0])
item2.setEditable(False)
item3 = QtGui.QStandardItem(ps[1])
item3.setEditable(False)
root.appendRow([item1, item2, item3])
def measureFinish(self):
self.pdlg.close()
if self.measure.data is not None and self.measure.stat is not None:
self.data = self.measure.data
self.stat = self.measure.stat
self.psmin = self.measure.psmin
self.afnd = self.measure.afnd
self.labelaf.setText('AF : %f' % self.afnd[0])
self.labelnd.setText('ND : %f' % self.afnd[1])
self.line1.setText('%.3f' % self.psmin[0])
self.drawGraph()
def measureCancel(self):
self.measure.terminate()
def drawSize(self, freq, nclass, dsize, xlabel, ylabel, nsample, stat):
self.figure.clf()
ax = self.figure.add_subplot(1,1,1)
length = | np.arange(freq.size) | numpy.arange |
from sklearn.datasets import load_files
import logging
import numpy as np
from optparse import OptionParser
import sys
from time import time
import matplotlib.pyplot as plt
from scipy.optimize import minimize
import bz2;
from tqdm import tqdm;
from math import exp;
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.feature_selection import SelectFromModel
from sklearn.feature_selection import SelectKBest, chi2
from sklearn.linear_model import RidgeClassifier, LogisticRegression
from sklearn.pipeline import Pipeline
from sklearn.svm import LinearSVC
from sklearn.linear_model import SGDClassifier
from sklearn.linear_model import Perceptron
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.naive_bayes import BernoulliNB, MultinomialNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.neighbors import NearestCentroid
from sklearn.ensemble import RandomForestClassifier
from sklearn.utils.extmath import density
from sklearn import metrics
from sklearn.metrics import classification_report,confusion_matrix
from sklearn.calibration import CalibratedClassifierCV
class Options:
resample = True
weighting = 'score'
score="f1"
opts = Options()
from abstaining import aCL, NI
def grid_search(P,y, verbose=False):
''' A pretty simple grid search with visualization of the 2D space'''
img = np.zeros([25,25]);
r = np.linspace(0,1,25);
best_indices=None
best_NI = 0
for i1,t1 in enumerate(r):
for i2,t2 in enumerate(r):
c = aCL(P,np.array([t1,t2]))
ni= NI(y,c,2)
this_NI = ni.NI()
img[i1,i2] = this_NI
if this_NI > best_NI:
best_NI = this_NI
best_T = np.array([t1,t2])
if verbose:
print("%f %f --- %f" % (t1,t2,ni.NI()))
print( "Optimization Result (Grid Search):%f %f --- %f" %(best_T[0],best_T[1], best_NI) )
return best_NI, best_T, img
def optimize_kernel(x,args):
''' A kernel to be minimized, args are P and y and verbose '''
c=aCL(args[0], np.array(x))
if (args[2]):
print("params",x);
ni = NI(args[1],c,2) # information with respect to target.
return 1-ni.NI(); # minimizing this maximizes the function
def vote(X_train, y_train, X_test, y_test):
for clf, name in (
(MultinomialNB(alpha=.001),"Multinomial Naive Bayes"),
(MultinomialNB(alpha=.01),"Multinomial Naive Bayes"),
(MultinomialNB(alpha=.1),"Multinomial Naive Bayes"),
(BernoulliNB(alpha=.001), "Bernoulli Bayes"),
(BernoulliNB(alpha=.01), "Bernoulli Bayes"),
(BernoulliNB(alpha=.1), "Bernoulli Bayes"),
#- (RidgeClassifier(tol=1e-2, solver="lsqr"), "Ridge Classifier"),
#- (Perceptron(n_iter=50), "Perceptron"),
#- (PassiveAggressiveClassifier(n_iter=50), "Passive-Aggressive"),
# (KNeighborsClassifier(n_neighbors=10), "kNN"),
# (RandomForestClassifier(n_estimators=100), "Random forest"),
#- (ExtraTreesClassifier(n_estimators=100), "ExtraTree"),
(SGDClassifier(alpha=.001, max_iter=500,loss="modified_huber",penalty="l2"), "SGD-l2"),
(SGDClassifier(alpha=.001, max_iter=500,loss="modified_huber",penalty="l1"), "SGD-l1"),
(LogisticRegression(penalty="l2",
dual=False,
tol=0.0001,
C=1.0,
fit_intercept=True,
intercept_scaling=1,
class_weight=None,
random_state=None,
solver="liblinear",
max_iter=100,
multi_class="ovr",
verbose=0,
warm_start=False,
n_jobs=1), "MaxEnt"),
# (SGDClassifier(alpha=.001, n_iter=500,loss="log",penalty="elasticnet"), "SGD-elastic"),
# (CalibratedClassifierCV(SGDClassifier(alpha=.001, n_iter=500,penalty="elasticnet")), "SGD-elastic"),
# (CalibratedClassifierCV(LinearSVC(penalty="l2", dual=False,tol=1e-3)),"L-SVC-l2"), # turns decision_function to predict_proba
):
print(clf)
clf.fit(X_train, y_train)
pred = clf.predict(X_train)
print("Training error (BIAS)")
print(metrics.classification_report(y_train, pred))
pred = clf.predict(X_test)
print("Validation")
print(pred.shape)
print(y_test.shape)
print(metrics.classification_report(y_test, pred))
P = clf.predict_proba(X_test)
direc = np.random.rand(10,2)
res = minimize(optimize_kernel, [0.01,0.01],[P,y_test,False], method='Powell', tol=1e-4, options={'disp':False, 'direc':direc})
pred = aCL(P,res.x)
print("Abstained Validation")
print(metrics.classification_report(y_test, pred))
print("abstained in %d of %d cases (%f)" % (np.sum(pred==2), len(y_test),np.sum(pred==2)/ len(y_test) ))
print(metrics.confusion_matrix(y_test, pred))
if opts.score=="precision":
ps = metrics.precision_score(y_test, pred, average=None)
elif opts.score=="f1":
ps = metrics.f1_score(y_test, pred, average=None)
elif opts.score=='f1squared':
ps = metrics.f1_score(y_test, pred, average=None)
ps = [ x*x for x in ps]
elif opts.score=='f1exp':
ps = metrics.f1_score(y_test, pred, average=None)
ps = [ exp(x) for x in ps]
else:
raise "unknown score "+opts.score
yield ps, pred
print("Load...")
with bz2.BZ2File("la-large-full/single-file.txt.bz2") as f:
lines = f.readlines()
print("Found %d records" % len(lines))
print("Transform to NPY")
lines = [x.decode() for x in tqdm(lines)]
ds = [[l.split(" ")[0], l.split(" ")[1]," ".join(l.split(" ")[2:])] for l in tqdm(lines)]
ds = np.array(ds)
print(ds.shape)
print("Transform to sklearn sets")
class TextDataset:
target=None
data=None
target_names=None
data_train = TextDataset();
data_train.target = (ds[ds[:,0] == 'left',1]=='residential')*1.0
data_train.data = ds[ds[:,0] == 'left',2]
data_train.target_names = ["commercial", "residential"]
data_test = TextDataset();
data_test.target=(ds[ds[:,0] == 'right',1]=='residential')*1.0
data_test.data = ds[ds[:,0] == 'right',2]
data_test.target_names=["commercial", "residential"]
#possibly resample here:
_, counts = np.unique(data_train.target, return_counts=True)
print(counts)
N = np.min(counts)
_, counts = np.unique(data_test.target, return_counts=True)
print(counts)
N = min(N, np.min(counts))
print("Sampling to %d" % (N))
np.random.seed(42);
if opts.resample:
print("resampling")
# selector for N
select = np.hstack([
np.random.choice(np.argwhere(data_train.target==0).squeeze(),N),
np.random.choice(np.argwhere(data_train.target==1).squeeze(),N)
])
data_train.target = data_train.target[select]
data_train.data = data_train.data[select]
select = np.hstack([
np.random.choice(np.argwhere(data_test.target==0).squeeze(),N),
np.random.choice(np.argwhere(data_test.target==1).squeeze(),N)
])
data_test.target = data_test.target[select]
data_test.data = data_test.data[select]
print("finished resampling")
print("Data Setup complete")
print("Vectorize")
vectorizer = TfidfVectorizer(sublinear_tf=True, min_df = 0.001, max_df=0.2,
stop_words='english')
X_train = vectorizer.fit_transform(data_train.data)
y_train = data_train.target
X_test = vectorizer.transform(data_test.data)
y_test = data_test.target
print(X_train.shape)
print(X_test.shape)
#
votes = [x for x in vote(X_train,y_train, X_test,y_test)]
v = [x[1] for x in votes]
w_0 = [x[0][0] for x in votes] # focus weight on residential layer
w_1 = [x[0][1] for x in votes] # focus weight on residential layer
w_0 = w_0 / np.sum(w_0)
w_1 = w_1 / np.sum(w_1)
if opts.weighting is None:
votes_for_0 = np.average((np.array(v) == 0),axis=0)
votes_for_1 = np.average((np.array(v) == 1),axis=0)
elif opts.weighting=='score':
print("Using score" + opts.score)
votes_for_0 = np.average(( | np.array(v) | numpy.array |
# -*- coding: utf-8 -*-
"""
Created on Fri Aug 31 12:26:53 2018
@author: <NAME> & <NAME>
"""
from __future__ import print_function
import sklearn.ensemble
from sklearn import metrics
from myo import init, Hub, DeviceListener, StreamEmg
from time import sleep
import numpy as np
import threading
import collections
import math
import _pickle as cPickle
X=[]
ges3 = ["Spread Fingers", "Wave Out", "Wave In", "Fist", "Rest"]
ges = ges3
class Listener(DeviceListener):
def __init__(self, queue_size=1):
self.lock = threading.Lock()
self.emg_data_queue = collections.deque(maxlen=queue_size)
self.ori_data_queue = collections.deque(maxlen=queue_size)
def on_connect(self, myo, timestamp, firmware_version):
myo.set_stream_emg(StreamEmg.enabled)
def on_emg_data(self, myo, timestamp, emg):
if(status):
X.append(np.asarray(emg))
def on_orientation_data(self, myo, timestamp, quat):
# print("Orientation:", quat.x, quat.y, quat.z, quat.w)
with self.lock:
self.ori_data_queue.append(quat)
def get_ori_data(self):
with self.lock:
return list(self.ori_data_queue)
def rms(array):
n = len(array)
sum = 0
for a in array:
sum =+ a*a
return np.sqrt((1/float(n))*sum)
def iav(array):
sum = 0
for a in array:
sum += np.abs(a)
return sum
def ssi(array):
sum = 0
for a in array:
sum += a*a
return sum
def var(array):
n = len(array)
sum = 0
for a in array:
sum += a*a
return ((1/float(n-1))*sum)
def tm3(array):
n = len(array)
print('n : ', n)
sum = 0
for a in array:
sum =+ a*a*a
return np.power((1/float(n))*sum,1/float(3))
def wl(array):
sum = 0
for a in range(0,len(array)-1):
sum =+ array[a+1] - array[a]
return sum
def aac(array):
n = len(array)
sum = 0
for a in range(0,n-1):
sum =+ array[0+1] - array[0]
return sum/float(n)
def toEuler(quat):
quat = quat[0]
# Roll
sin = 2.0 * (quat.w * quat.w + quat.y * quat.z)
cos = +1.0 - 2.0 * (quat.x * quat.x + quat.y * quat.y)
roll = math.atan2(sin, cos)
# Pitch
pitch = math.asin(2 * (quat.w * quat.y - quat.z * quat.x))
# Yaw
sin = 2.0 * (quat.w * quat.z + quat.x * quat.y)
cos = +1.0 - 2.0 * (quat.y * quat.y + quat.z * quat.z)
yaw = math.atan2(sin, cos)
return [pitch, roll, yaw]
init()
hub = Hub()
listener = Listener()
hub.run(1000, listener)
status = 9999
# load it again
with open('META001.pkl', 'rb') as fid:
gnb_loaded1 = cPickle.load(fid)
with open('META002.pkl', 'rb') as fid:
gnb_loaded2 = cPickle.load(fid)
with open('META003.pkl', 'rb') as fid:
gnb_loaded3 = cPickle.load(fid)
X = []
#toEuler(listener.get_ori_data())
while(1):
# myo = feed.get_connected_devices()
if len(X) > 20:
x_f_h = []
X1 = | np.asarray(X) | numpy.asarray |
from openmdao.api import ExplicitComponent
import numpy as np
from scipy.integrate import cumtrapz
from wisdem.commonse import gravity, eps, DirectionVector, NFREQ
from wisdem.commonse.utilities import assembleI, unassembleI
from .map_mooring import NLINES_MAX
class SubstructureGeometry(ExplicitComponent):
"""
OpenMDAO Component class for substructure geometry for floating offshore wind turbines.
"""
def initialize(self):
self.options.declare('nFull')
self.options.declare('nFullTow')
def setup(self):
nFull = self.options['nFull']
nFullTow = self.options['nFullTow']
# Design variables
self.add_input('main_d_full', val=np.zeros((nFull,)), units='m', desc='outer radius at each section node bottom to top (length = nsection + 1)')
self.add_input('offset_d_full', val=np.zeros((nFull,)), units='m', desc='outer radius at each section node bottom to top (length = nsection + 1)')
self.add_input('offset_z_nodes', val=np.zeros((nFull,)), units='m', desc='z-coordinates of section nodes (length = nsection+1)')
self.add_input('offset_freeboard', val=0.0, units='m', desc='Length of column above water line')
self.add_input('offset_draft', val=0.0, units='m', desc='Length of column below water line')
self.add_input('main_z_nodes', val=np.zeros((nFull,)), units='m', desc='z-coordinates of section nodes (length = nsection+1)')
self.add_input('fairlead_location', val=0.0, desc='Fractional length from column bottom to top for mooring line attachment')
self.add_input('fairlead_offset_from_shell', val=0.0, units='m',desc='fairlead offset from shell')
self.add_input('radius_to_offset_column', val=0.0, units='m',desc='Distance from main column centerpoint to offset column centerpoint')
self.add_input('number_of_offset_columns', val=0, desc='Number of offset columns evenly spaced around main column')
self.add_input('tower_d_full', val=np.zeros((nFullTow,)), units='m', desc='outer radius at each section node bottom to top (length = nsection + 1)')
self.add_input('Rhub', val=0.0, units='m', desc='rotor hub radius')
self.add_input('Hs', val=0.0, units='m', desc='significant wave height')
self.add_input('max_survival_heel', val=0.0, units='deg', desc='max heel angle for turbine survival')
# Output constraints
self.add_output('fairlead', val=0.0, units='m', desc='Depth below water line for mooring line attachment')
self.add_output('fairlead_radius', val=0.0, units='m', desc='Outer spar radius at fairlead depth (point of mooring attachment)')
self.add_output('main_offset_spacing', val=0.0, desc='Radius of main and offset columns relative to spacing')
self.add_output('tower_transition_buffer', val=0.0, units='m', desc='Buffer between substructure main and tower main')
self.add_output('nacelle_transition_buffer', val=0.0, units='m', desc='Buffer between tower top and nacelle main')
self.add_output('offset_freeboard_heel_margin', val=0.0, units='m', desc='Margin so offset column does not submerge during max heel')
self.add_output('offset_draft_heel_margin', val=0.0, units='m', desc='Margin so offset column does not leave water during max heel')
self.add_output('wave_height_fairlead_ratio', val=0.0, desc='Ratio of maximum wave height (avg of top 1%) to fairlead')
# Derivatives
self.declare_partials('*', '*', method='fd', form='central', step=1e-6)
def compute(self, inputs, outputs):
"""Sets nodal points and sectional centers of mass in z-coordinate system with z=0 at the waterline.
Nodal points are the beginning and end points of each section.
Nodes and sections start at bottom and move upwards.
INPUTS:
----------
params : dictionary of input parameters
outputs : dictionary of output parameters
OUTPUTS : none (all unknown dictionary values set)
"""
# Unpack variables
ncolumns = int(inputs['number_of_offset_columns'])
R_od_main = 0.5*inputs['main_d_full']
R_od_offset = 0.5*inputs['offset_d_full']
R_semi = inputs['radius_to_offset_column']
R_tower = 0.5*inputs['tower_d_full']
R_hub = inputs['Rhub']
z_nodes_offset = inputs['offset_z_nodes']
z_nodes_main = inputs['main_z_nodes']
location = inputs['fairlead_location']
fair_off = inputs['fairlead_offset_from_shell']
off_freeboard = inputs['offset_freeboard']
off_draft = inputs['offset_draft']
max_heel = inputs['max_survival_heel']
# Set spacing constraint
outputs['main_offset_spacing'] = R_semi - R_od_main.max() - R_od_offset.max()
# Determine location and radius at mooring connection point (fairlead)
if ncolumns > 0:
z_fairlead = location * (z_nodes_offset[-1] - z_nodes_offset[0]) + z_nodes_offset[0]
outputs['fairlead_radius'] = R_semi + fair_off + np.interp(z_fairlead, z_nodes_offset, R_od_offset)
else:
z_fairlead = location * (z_nodes_main[-1] - z_nodes_main[0]) + z_nodes_main[0]
outputs['fairlead_radius'] = fair_off + np.interp(z_fairlead, z_nodes_main, R_od_main)
outputs['fairlead'] = -z_fairlead # Fairlead defined as positive below waterline
outputs['wave_height_fairlead_ratio'] = inputs['Hs'] / np.abs(z_fairlead)
# Constrain spar top to be at least greater than tower main
outputs['tower_transition_buffer'] = R_od_main[-1] - R_tower[0]
outputs['nacelle_transition_buffer'] = (R_hub + 1.0) - R_tower[-1] # Guessing at 6m size for nacelle
# Make sure semi columns don't get submerged
heel_deflect = R_semi*np.sin(np.deg2rad(max_heel))
outputs['offset_freeboard_heel_margin'] = off_freeboard - heel_deflect
outputs['offset_draft_heel_margin'] = off_draft - heel_deflect
class Substructure(ExplicitComponent):
def initialize(self):
self.options.declare('nFull')
self.options.declare('nFullTow')
def setup(self):
nFull = self.options['nFull']
nFullTow = self.options['nFullTow']
# Environment
self.add_input('water_density', val=0.0, units='kg/m**3', desc='density of water')
self.add_input('wave_period_range_low', val=2.0, units='s', desc='Lower bound of typical ocean wavve period')
self.add_input('wave_period_range_high', val=20.0, units='s', desc='Upper bound of typical ocean wavve period')
# From other components
self.add_input('operational_heel', val=0.0, units='deg',desc='Maximum angle of heel allowable')
self.add_input('mooring_mass', val=0.0, units='kg', desc='Mass of mooring lines')
self.add_input('mooring_moments_of_inertia', val=np.zeros(6), units='kg*m**2', desc='mass moment of inertia of mooring system about fairlead-centerline point [xx yy zz xy xz yz]')
self.add_input('mooring_neutral_load', val=np.zeros((NLINES_MAX,3)), units='N', desc='z-force of mooring lines on structure')
self.add_input('mooring_surge_restoring_force', val=0.0, units='N', desc='Restoring force from mooring system after surge motion')
self.add_input('mooring_pitch_restoring_force', val=np.zeros((NLINES_MAX,3)), units='N', desc='Restoring force from mooring system after pitch motion')
self.add_input('mooring_cost', val=0.0, units='USD', desc='Cost of mooring system')
self.add_input('mooring_stiffness', val=np.zeros((6,6)), units='N/m', desc='Linearized stiffness matrix of mooring system at neutral (no offset) conditions.')
self.add_input('fairlead', val=1.0, units='m', desc='Depth below water for mooring line attachment')
self.add_input('fairlead_radius', val=0.0, units='m', desc='Outer spar radius at fairlead depth (point of mooring attachment)')
self.add_input('number_of_offset_columns', val=0, desc='Number of offset columns evenly spaced around main column')
self.add_input('radius_to_offset_column', val=0.0, units='m',desc='Distance from main column centerpoint to offset column centerpoint')
self.add_input('main_Iwaterplane', val=0.0, units='m**4', desc='Second moment of area of waterplane cross-section')
self.add_input('main_Awaterplane', val=0.0, units='m**2', desc='Area of waterplane cross-section')
self.add_input('main_cost', val=0.0, units='USD', desc='Cost of spar structure')
self.add_input('main_mass', val=np.zeros((nFull-1,)), units='kg', desc='mass of main column by section')
self.add_input('main_freeboard', val=0.0, units='m', desc='Length of spar above water line')
self.add_input('main_center_of_buoyancy', val=0.0, units='m', desc='z-position of center of column buoyancy force')
self.add_input('main_center_of_mass', val=0.0, units='m', desc='z-position of center of column mass')
self.add_input('main_moments_of_inertia', val=np.zeros(6), units='kg*m**2', desc='mass moment of inertia of column about main [xx yy zz xy xz yz]')
self.add_input('main_added_mass', val=np.zeros(6), units='kg', desc='Diagonal of added mass matrix- masses are first 3 entries, moments are last 3')
self.add_input('offset_Iwaterplane', val=0.0, units='m**4', desc='Second moment of area of waterplane cross-section')
self.add_input('offset_Awaterplane', val=0.0, units='m**2', desc='Area of waterplane cross-section')
self.add_input('offset_cost', val=0.0, units='USD', desc='Cost of spar structure')
self.add_input('offset_mass', val=np.zeros((nFull-1,)), units='kg', desc='mass of offset column by section')
self.add_input('offset_center_of_buoyancy', val=0.0, units='m', desc='z-position of center of column buoyancy force')
self.add_input('offset_center_of_mass', val=0.0, units='m', desc='z-position of center of column mass')
self.add_input('offset_moments_of_inertia', val=np.zeros(6), units='kg*m**2', desc='mass moment of inertia of column about main [xx yy zz xy xz yz]')
self.add_input('offset_added_mass', val=np.zeros(6), units='kg', desc='Diagonal of added mass matrix- masses are first 3 entries, moments are last 3')
self.add_input('tower_mass', val=0.0, units='kg', desc='Mass of tower')
self.add_input('tower_shell_cost', val=0.0, units='USD', desc='Cost of tower')
self.add_input('tower_I_base', val=np.zeros(6), units='kg*m**2', desc='Moments about tower main')
self.add_input('tower_z_full', val=np.zeros((nFullTow,)), units='m', desc='z-coordinates of section nodes (length = nsection+1)')
self.add_input('rna_mass', val=0.0, units='kg', desc='Mass of RNA')
self.add_input('rna_cg', val=np.zeros(3), units='m', desc='Location of RNA center of mass relative to tower top')
self.add_input('rna_I', val=np.zeros(6), units='kg*m**2', desc='Moments about turbine main')
self.add_input('water_ballast_zpts_vector', val=np.zeros((nFull,)), units='m', desc='z-points of potential ballast mass')
self.add_input('water_ballast_radius_vector', val=np.zeros((nFull,)), units='m', desc='Inner radius of potential ballast mass')
self.add_input('structural_mass', val=0.0, units='kg', desc='Mass of whole turbine except for mooring lines')
self.add_input('structure_center_of_mass', val=np.zeros(3), units='m', desc='xyz-position of center of gravity of whole turbine')
self.add_input('structural_frequencies', val=np.zeros(NFREQ), units='Hz', desc='')
self.add_input('z_center_of_buoyancy', val=0.0, units='m', desc='z-position of center of gravity (x,y = 0,0)')
self.add_input('total_displacement', val=0.0, units='m**3', desc='Total volume of water displaced by floating turbine (except for mooring lines)')
self.add_input('total_force', val=np.zeros(3), units='N', desc='Net forces on turbine')
self.add_input('total_moment', val=np.zeros(3), units='N*m', desc='Moments on whole turbine')
self.add_input('pontoon_cost', val=0.0, units='USD', desc='Cost of pontoon elements and connecting truss')
# Outputs
self.add_output('substructure_moments_of_inertia', val=np.zeros(6), units='kg*m**2', desc='mass moment of inertia of substructure (no tower or rna or mooring) [xx yy zz xy xz yz]')
self.add_output('total_mass', val=0.0, units='kg', desc='total mass of spar and moorings')
self.add_output('total_cost', val=0.0, units='USD', desc='total cost of spar and moorings')
self.add_output('metacentric_height', val=0.0, units='m', desc='measure of static overturning stability')
self.add_output('buoyancy_to_gravity', val=0.0, desc='static stability margin based on position of centers of gravity and buoyancy')
self.add_output('offset_force_ratio', val=0.0, desc='total surge force divided by restoring force')
self.add_output('heel_moment_ratio', val=0.0, desc='total pitch moment divided by restoring moment')
self.add_output('Iwaterplane_system', val=0.0, units='m**4', desc='Second moment of area of waterplane cross-section for whole structure')
self.add_output('center_of_mass', val=np.zeros(3), units='m', desc='xyz-position of center of gravity (x,y = 0,0)')
self.add_output('variable_ballast_mass', val=0.0, units='kg', desc='Amount of variable water ballast')
self.add_output('variable_ballast_center_of_mass', val=0.0, units='m', desc='Center of mass for variable ballast')
self.add_output('variable_ballast_moments_of_inertia', val= | np.zeros(6) | numpy.zeros |
import numpy as np
import matplotlib.pyplot as plt
import time
def pixel_rgb_distance_map(img_arr, kernel, points=None):
sub_pooled_scores_list = []
pooled_scores_list = []
for i in range(kernel.shape[0]):
for j in range(kernel.shape[1]):
if i != j:
continue
kpix = kernel[i, j, :]
norm_matrix = np.expand_dims( | np.linalg.norm(img_arr, axis=2) | numpy.linalg.norm |
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
from scipy.sparse import csr_matrix, identity, kron
from scipy.sparse.linalg import eigs, eigsh
import itertools
from scipy.linalg import block_diag, eig, expm, eigh
from scipy.sparse import save_npz, load_npz, csr_matrix, csc_matrix
import scipy.sparse as sp
from scipy.special import binom
import yaml
import copy
import warnings
import os
import time
from .Hamiltonians import DisplacedAnharmonicOscillator, PolymerVibrations, Polymer, DiagonalizeHamiltonian, LadderOperators
from .general_Liouvillian_classes import LiouvillianConstructor
class OpenPolymer(Polymer,LiouvillianConstructor):
def __init__(self,site_energies,site_couplings,dipoles):
"""Extends Polymer object to an open systems framework,
using the Lindblad formalism to describe bath coupling
"""
super().__init__(site_energies,site_couplings,dipoles)
# Values that need to be set
self.optical_dephasing_gamma = 0
self.optical_relaxation_gamma = 0
self.site_to_site_dephasing_gamma = 0
self.site_to_site_relaxation_gamma = 0
self.exciton_relaxation_gamma = 0
self.exciton_exciton_dephasing_gamma = 0
self.kT = 0
def optical_dephasing_operator(self):
total_deph = self.occupied_list[0].copy()
for i in range(1,len(self.occupied_list)):
total_deph += self.occupied_list[i]
return total_deph
def optical_dephasing_instructions(self):
O = self.optical_dephasing_operator()
gamma = self.optical_dephasing_gamma
return self.make_Lindblad_instructions(gamma,O)
def optical_dephasing_Liouvillian(self):
instructions = self.optical_dephasing_instructions()
return self.make_Liouvillian(instructions)
def boltzmann_factors(self,E1,E2):
if E1 == E2:
return 0.5,0.5
if E1 < E2:
return self.boltzmann_factors_ordered_inputs(E1,E2)
else:
E1_to_E2, E2_to_E1 = self.boltzmann_factors_ordered_inputs(E2,E1)
return E2_to_E1, E1_to_E2
def boltzmann_factors_ordered_inputs(self,E1,E2):
"""E1 must be less than E2"""
if self.kT == 0:
return 1, 0
Z = np.exp(-E1/self.kT) + np.exp(-E2/self.kT)
if np.isclose(Z,0):
E2_to_E1 = 1
E1_to_E2 = 0
else:
E2_to_E1 = np.exp(-E1/self.kT)/Z
E1_to_E2 = np.exp(-E2/self.kT)/Z
return E2_to_E1, E1_to_E2
def optical_relaxation_instructions(self):
eg = 0
ins_list = []
gamma = self.optical_relaxation_gamma
for n in range(len(self.energies)):
en = self.energies[n]
bg, bn = self.boltzmann_factors(eg,en)
O = self.up_list[n]
instructions2 = self.make_Lindblad_instructions(gamma * bg,O.T)
ins_list += instructions2
if np.isclose(bn,0):
pass
else:
instructions1 = self.make_Lindblad_instructions(gamma * bn,O)
ins_list += instructions1
return ins_list
def optical_relaxation_Liouvillian(self):
inst_list = self.optical_relaxation_instructions()
L = self.make_Liouvillian(inst_list)
return L
def site_to_site_relaxation_instructions(self):
nm = itertools.combinations(range(len(self.energies)),2)
i = 0
ins_list = []
gamma = self.site_to_site_relaxation_gamma
for n,m in nm:
en = self.energies[n]
em = self.energies[m]
bn,bm = self.boltzmann_factors(en,em)
O = self.exchange_list[i]
instructions1 = self.make_Lindblad_instructions(gamma * bn,O)
instructions2 = self.make_Lindblad_instructions(gamma * bm,O.T)
ins_list += instructions1
ins_list += instructions2
i+=1
return ins_list
def site_to_site_relaxation_Liouvillian(self):
inst_list = self.site_to_site_relaxation_instructions()
L = self.make_Liouvillian(inst_list)
return L
def site_to_site_dephasing_operator_list(self):
s_deph_list = []
for (i,j) in itertools.combinations(range(self.num_sites),2):
s_deph_list.append(self.occupied_list[i] - self.occupied_list[j])
return s_deph_list
def all_site_dephasing_instructions(self):
s_deph_list = self.site_to_site_dephasing_operator_list()
Lindblad_instruction_list = []
gamma = self.site_to_site_dephasing_gamma
for O in s_deph_list:
Lindblad_instruction_list += self.make_Lindblad_instructions(gamma,O)
return Lindblad_instruction_list
def all_site_dephasing_Liouvillian(self):
inst_list = self.all_site_dephasing_instructions()
L = self.make_Liouvillian(inst_list)
return L/(2*self.num_sites)
def set_electronic_dissipation_instructions(self):
inst_list = []
if self.optical_dephasing_gamma != 0:
inst_list += self.optical_dephasing_instructions()
if self.site_to_site_dephasing_gamma != 0:
inst_list += self.all_site_dephasing_instructions()
if self.site_to_site_relaxation_gamma != 0:
inst_list += self.site_to_site_relaxation_instructions()
if self.optical_relaxation_gamma != 0:
inst_list += self.optical_relaxation_instructions()
self.electronic_dissipation_instructions = inst_list
def make_manifold_hamiltonian_instructions(self,ket_manifold,bra_manifold):
Hket = self.get_electronic_hamiltonian(manifold_num = ket_manifold)
Hbra = self.get_electronic_hamiltonian(manifold_num = bra_manifold)
return self.make_commutator_instructions2(-1j*Hket,-1j*Hbra)
def make_total_Liouvillian(self):
drho = self.make_Liouvillian(self.make_manifold_hamiltonian_instructions('all','all'))
if self.num_sites > 1:
drho += self.all_exciton_dephasing_Liouvillian()
drho += self.exciton_relaxation_Liouvillian()
# drho += self.optical_relaxation_Liouvillian()
drho += self.optical_dephasing_Liouvillian()
self.L = drho
def eigfun(self,L,*,check_eigenvectors = True,invert = True,populations_only = False):
eigvals, eigvecs = np.linalg.eig(L)
eigvals = np.round(eigvals,12)
sort_indices = eigvals.argsort()
eigvals.sort()
eigvecs = eigvecs[:,sort_indices]
for i in range(eigvals.size):
max_index = np.argmax(np.abs(eigvecs[:,i]))
if np.real(eigvecs[max_index,i]) < 0:
eigvecs[:,i] *= -1
if eigvals[i] == 0:
# eigenvalues of 0 correspond to thermal distributions,
# which should have unit trace in the Hamiltonian space
if populations_only:
trace_norm = eigvecs[:,i].sum()
eigvecs[:,i] = eigvecs[:,i] / trace_norm
else:
shape = int(np.sqrt(eigvals.size))
trace_norm = eigvecs[:,i].reshape(shape,shape).trace()
if np.isclose(trace_norm,0):
pass
else:
eigvecs[:,i] = eigvecs[:,i] / trace_norm
if invert:
eigvecs_left = np.linalg.pinv(eigvecs)
else:
eigvals_left, eigvecs_left = np.linalg.eig(L.T)
eigvals_left = np.round(eigvals_left,12)
sort_indices_left = eigvals_left.argsort()
eigvals_left.sort()
eigvecs_left = eigvecs_left[:,sort_indices_left]
eigvecs_left = eigvecs_left.T
for i in range(eigvals_left.size):
norm = np.dot(eigvecs_left[i,:],eigvecs[:,i])
eigvecs_left[i,:] *= 1/norm
if check_eigenvectors:
LV = L.dot(eigvecs)
D = eigvecs_left.dot(LV)
if np.allclose(D,np.diag(eigvals),rtol=1E-10,atol=1E-10):
pass
else:
warnings.warn('Using eigenvectors to diagonalize Liouvillian does not result in the expected diagonal matrix to tolerance, largest deviation is {}'.format(np.max(np.abs(D - np.diag(eigvals)))))
self.eigenvalues = eigvals
self.eigenvectors = {'left':eigvecs_left,'right':eigvecs}
return eigvals, eigvecs, eigvecs_left
def save_L(self,dirname):
save_npz(os.path.join(dirname,'L.npz'),csr_matrix(self.L))
def save_L_by_manifold(self):
np.savez(os.path.join(self.base_path,'L.npz'),**self.L_by_manifold)
def save_eigsystem(self,dirname):
np.savez(os.path.join(dirname,'right_eigenvectors.npz'),all_manifolds = self.eigenvectors['right'])
np.savez(os.path.join(dirname,'left_eigenvectors.npz'),all_manifolds = self.eigenvectors['left'])
np.savez(os.path.join(dirname,'eigenvalues.npz'),all_manifolds = self.eigenvalues)
def save_mu(self,dirname,*,mask=True):
evl = self.eigenvectors['left']
ev = self.eigenvectors['right']
II = np.eye(self.mu.shape[0])
mu_ket = np.kron(self.mu,II.T)
mu_bra = np.kron(II,self.mu.T)
mu_mask_tol = 10
mu_ket_t = np.dot(np.dot(evl,mu_ket),ev)
mu_ket_3d = np.zeros((mu_ket_t.shape[0],mu_ket_t.shape[0],3),dtype='complex')
mu_ket_3d[:,:,0] = mu_ket_t
mu_bra_t = np.dot(np.dot(evl,mu_bra),ev)
mu_bra_3d = np.zeros((mu_bra_t.shape[0],mu_bra_t.shape[0],3),dtype='complex')
mu_bra_3d[:,:,0] = mu_bra_t
if mask:
ket_mask = np.zeros(mu_ket_t.shape,dtype='bool')
ket_mask[:,:] = np.round(mu_ket_t,mu_mask_tol)[:,:]
mu_ket_t_masked = mu_ket_t * ket_mask
mu_ket_3d_masked = np.zeros((mu_ket_t.shape[0],mu_ket_t.shape[0],3),dtype='complex')
mu_ket_3d_masked[:,:,0] = mu_ket_t_masked
bra_mask = np.zeros(mu_bra_t.shape,dtype='bool')
bra_mask[:,:] = np.round(mu_bra_t,mu_mask_tol)[:,:]
mu_bra_t_masked = mu_bra_t * bra_mask
mu_bra_3d_masked = np.zeros((mu_ket_t.shape[0],mu_ket_t.shape[0],3),dtype='complex')
mu_bra_3d_masked[:,:,0] = mu_bra_t_masked
np.savez(os.path.join(dirname,'mu.npz'),ket=mu_ket_3d,bra=mu_bra_3d)
np.savez(os.path.join(dirname,'eigenvalues.npz'),all_manifolds=self.eigenvalues)
np.savez(os.path.join(dirname,'right_eigenvectors.npz'),all_manifolds=ev)
np.savez(os.path.join(dirname,'left_eigenvectors.npz'),all_manifolds=evl)
np.savez(os.path.join(dirname,'mu_boolean.npz'),ket=ket_mask,bra=bra_mask)
np.savez(os.path.join(dirname,'mu_pruned.npz'),ket=mu_ket_3d_masked,bra=mu_bra_3d_masked)
else:
np.savez(os.path.join(dirname,'mu.npz'),ket=mu_ket_3d,bra=mu_bra_3d)
np.savez(os.path.join(dirname,'eigenvalues.npz'),all_manifolds=self.eigenvalues)
np.savez(os.path.join(dirname,'right_eigenvectors.npz'),all_manifolds=ev)
np.savez(os.path.join(dirname,'left_eigenvectors.npz'),all_manifolds=evl)
def save_RWA_mu(self,dirname,*,mask=True):
evl = self.eigenvectors['left']
ev = self.eigenvectors['right']
II = np.eye(self.mu_ket_up.shape[0])
mu_ket_up = np.kron(self.mu_ket_up,II.T)
mu_ket_down = np.kron(self.mu_ket_up.T,II.T)
mu_bra_up = np.kron(II,self.mu_ket_up)
mu_bra_down = np.kron(II,self.mu_ket_up.T)
mu_mask_tol = 10
mu_ket_up_t = np.dot(np.dot(evl,mu_ket_up),ev)
mu_ket_up_3d = np.zeros((mu_ket_up_t.shape[0],mu_ket_up_t.shape[0],3),dtype='complex')
mu_ket_up_3d[:,:,0] = mu_ket_up_t
mu_bra_up_t = np.dot(np.dot(evl,mu_bra_up),ev)
mu_bra_up_3d = np.zeros((mu_bra_up_t.shape[0],mu_bra_up_t.shape[0],3),dtype='complex')
mu_bra_up_3d[:,:,0] = mu_bra_up_t
mu_ket_down_t = np.dot(np.dot(evl,mu_ket_down),ev)
mu_ket_down_3d = np.zeros((mu_ket_down_t.shape[0],mu_ket_down_t.shape[0],3),dtype='complex')
mu_ket_down_3d[:,:,0] = mu_ket_down_t
mu_bra_down_t = np.dot(np.dot(evl,mu_bra_down),ev)
mu_bra_down_3d = np.zeros((mu_bra_down_t.shape[0],mu_bra_down_t.shape[0],3),dtype='complex')
mu_bra_down_3d[:,:,0] = mu_bra_down_t
if mask:
ket_up_mask = np.zeros(mu_ket_up_t.shape,dtype='bool')
ket_up_mask[:,:] = np.round(mu_ket_up_t,mu_mask_tol)[:,:]
mu_ket_up_t_masked = mu_ket_up_t * ket_up_mask
mu_ket_up_3d_masked = np.zeros((mu_ket_up_t.shape[0],mu_ket_up_t.shape[0],3),dtype='complex')
mu_ket_up_3d_masked[:,:,0] = mu_ket_up_t_masked
bra_up_mask = np.zeros(mu_bra_up_t.shape,dtype='bool')
bra_up_mask[:,:] = np.round(mu_bra_up_t,mu_mask_tol)[:,:]
mu_bra_up_t_masked = mu_bra_up_t * bra_up_mask
mu_bra_up_3d_masked = np.zeros((mu_ket_up_t.shape[0],mu_ket_up_t.shape[0],3),dtype='complex')
mu_bra_up_3d_masked[:,:,0] = mu_bra_up_t_masked
ket_down_mask = np.zeros(mu_ket_down_t.shape,dtype='bool')
ket_down_mask[:,:] = np.round(mu_ket_down_t,mu_mask_tol)[:,:]
mu_ket_down_t_masked = mu_ket_down_t * ket_down_mask
mu_ket_down_3d_masked = np.zeros((mu_ket_down_t.shape[0],mu_ket_down_t.shape[0],3),dtype='complex')
mu_ket_down_3d_masked[:,:,0] = mu_ket_down_t_masked
bra_down_mask = np.zeros(mu_bra_down_t.shape,dtype='bool')
bra_down_mask[:,:] = np.round(mu_bra_down_t,mu_mask_tol)[:,:]
mu_bra_down_t_masked = mu_bra_down_t * bra_down_mask
mu_bra_down_3d_masked = np.zeros((mu_ket_down_t.shape[0],mu_ket_down_t.shape[0],3),dtype='complex')
mu_bra_down_3d_masked[:,:,0] = mu_bra_down_t_masked
np.savez(os.path.join(dirname,'mu.npz'),ket_up=mu_ket_up_3d,bra_up=mu_bra_up_3d,
ket_down=mu_ket_down_3d,bra_down=mu_bra_down_3d)
np.savez(os.path.join(dirname,'eigenvalues.npz'),all_manifolds=self.eigenvalues)
np.savez(os.path.join(dirname,'right_eigenvectors.npz'),all_manifolds=ev)
np.savez(os.path.join(dirname,'left_eigenvectors.npz'),all_manifolds=evl)
np.savez(os.path.join(dirname,'mu_boolean.npz'),ket_up=ket_up_mask,bra_up=bra_up_mask,
ket_down=ket_down_mask,bra_down=bra_down_mask)
np.savez(os.path.join(dirname,'mu_pruned.npz'),ket_up=mu_ket_up_3d_masked,
bra_up=mu_bra_up_3d_masked,ket_down=mu_ket_down_3d_masked,
bra_down=mu_bra_down_3d_masked)
else:
np.savez(os.path.join(dirname,'mu.npz'),ket_up=mu_ket_up_3d,bra_up=mu_bra_up_3d,
ket_down=mu_ket_down_3d,bra_down=mu_bra_down_3d)
np.savez(os.path.join(dirname,'eigenvalues.npz'),all_manifolds=self.eigenvalues)
np.savez(os.path.join(dirname,'right_eigenvectors.npz'),all_manifolds=ev)
np.savez(os.path.join(dirname,'left_eigenvectors.npz'),all_manifolds=evl)
def save_RWA_mu_site_basis(self,dirname):
II = np.eye(self.mu_ket_up.shape[0])
mu_ket_up = | np.kron(self.mu_ket_up,II.T) | numpy.kron |
import os
from rpgpy import spectra2moments
from rpgpy import spcutil
from rpgpy import read_rpg
import numpy as np
from time import time
from numpy.testing import assert_array_almost_equal
FILE_PATH = os.path.dirname(os.path.realpath(__file__))
class TestFindPeaks:
def test_main_peak_1(self):
data = np.array([0, 0, 0, 0.3, 0, 0, 0.2, 0.3, 0.5, 0.2, 0, 0, 0, 0.2])
ind_left, ind_right = spcutil.find_peak_edges(data)
assert ind_left == 6
assert ind_right == 10
def test_find_single_value(self):
data = np.array([0, 0, 0, 0.3, 0, 0])
ind_left, ind_right = spcutil.find_peak_edges(data)
assert ind_left == 3
assert ind_right == 4
def test_find_left_edge(self):
data = np.array([0.1, 0.2, 0.3, 0.5, 0, 0])
ind_left, ind_right = spcutil.find_peak_edges(data)
assert ind_left == 0
assert ind_right == 4
def test_find_right_edge(self):
data = np.array([0, 0.2, 0.3, 0.5, 0.4, 0.3])
ind_left, ind_right = spcutil.find_peak_edges(data)
assert ind_left == 1
assert ind_right == 6 # is this OK, or should be 5 ?
def test_find_peak_with_secondary_peak(self):
data = | np.array([0, 0.1, 0.3, 0.2, 0.35, 0.5, 0.3, 0.1, 0]) | numpy.array |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Feb 16 08:50:54 2018
@author: nsde
"""
#%% Packages to use
from __future__ import print_function
import tensorflow as tf
import os
import numpy as np
import datetime
from sklearn.neighbors import NearestNeighbors, KNeighborsClassifier
from sklearn.decomposition import PCA
from dlmnn.helper.tf_funcs import tf_makePairwiseFunc
from dlmnn.helper.utility import get_optimizer, progressBar
from dlmnn.helper.neighbor_funcs import _weight_func as weight_func
from dlmnn.helper.logger import stat_logger
#%% Main Class
class lmnn(object):
def __init__(self, tf_transformer, margin=1, session=None, dir_loc=None,
optimizer='adam', verbose = 1):
''' Class for running the Large Margin Nearest Neighbour algorithm.
Arguments:
tf_transformer:
a callable function that takes a single i.e:
X_trans = tf_transformer(X). It is this function that is optimized
during training, and should therefore include some trainable
parameters
margin:
margin threshold for the algorithm. Determines the scaling of the
feature space
session:
tensorflow session which the computations are runned within.
If None then a new is opened
dir_loc:
directory to store tensorboard files. If None, a folder
will be created named lmnn
optimizer:
str, which optimizer to use for the training
verbose:
integer, in range [0,2], controls the level of output
'''
# Initilize session and tensorboard dirs
self.trans_name = tf_transformer.__name__
self.session = tf.Session() if session is None else session
self.dir_loc = (dir_loc+'/'+self.trans_name) if dir_loc is not None \
else self.trans_name
self.train_writer = None
self.val_writer = None
# Set variables for later training
self.optimizer = get_optimizer(optimizer)
self.verbose = verbose
self.margin = margin
# Set transformer and create a pairwise distance metric function
self.transformer = tf_transformer
self.metric_func = tf_makePairwiseFunc(tf_transformer)
def tf_findImposters(self, X, y, tN, margin=None):
''' Function for finding imposters in LMNN
For a set of observations X and that sets target neighbours in tN,
find all points that violate the following two equations
D(i, imposter) <= D(i, target_neighbour) + 1,
y(imposter) == y(target_neibour)
for a given distance measure
Arguments:
X: N x ? matrix or tensor of data
y: N x 1 vector, with class labels
L: d x d matrix, mahalanobis parametrization where M = L^T*L
tN: (N*k) x 2 matrix, where the first column in each row is the
observation index and the second column is the index of one
of the k target neighbours
Output:
tup: M x 3, where M is the number of triplets that where found to
fullfill the imposter equation. First column in each row is the
observation index, second column is the target neighbour index
and the third column is the imposter index
'''
with tf.name_scope('findImposters'):
margin = self.margin if margin is None else margin
N = tf.shape(X)[0]
n_tN = tf.shape(tN)[0]
# Calculate distance
D = self.metric_func(X, X) # d x d
# Create all combination of [points, targetneighbours, imposters]
possible_imp_array = tf.expand_dims(tf.reshape(
tf.ones((n_tN, N), tf.int32)*tf.range(N), (-1, )), 1)
tN_tiled = tf.reshape(tf.tile(tN, [1, N]), (-1, 2))
full_idx = tf.concat([tN_tiled, possible_imp_array], axis=1)
# Find distances for all combinations
tn_index = full_idx[:,:2]
im_index = full_idx[:,::2]
D_tn = tf.gather_nd(D, tn_index)
D_im = tf.gather_nd(D, im_index)
# Find actually imposter by evaluating equation
y = tf.cast(y, tf.float32) # tf.gather do not support first input.dtype=int32 on GPU
cond = tf.logical_and(D_im <= margin + D_tn, tf.logical_not(tf.equal(
tf.gather(y,tn_index[:,1]),tf.gather(y,im_index[:,1]))))
full_idx = tf.cast(full_idx, tf.float32) # tf.gather do not support first input.dtype=int32 on GPU
tup = tf.boolean_mask(full_idx, cond)
tup = tf.cast(tup, tf.int32) # tf.gather do not support first input.dtype=int32 on GPU
return tup
def tf_LMNN_loss(self, X, y, tN, tup, mu, margin=None):
''' Calculates the LMNN loss (eq. 13 in paper)
Arguments:
X: N x ? matrix or tensor of data
y: N x 1 vector, with class labels
tN: (N*k) x 2 matrix, with targetmetric, neighbour index
tup: ? x 3, where M is the number of triplets that where found to
fullfill the imposter equation. First column in each row is the
observation index, second column is the target neighbour index
and the third column is the imposter index
mu: scalar, weighting coefficient between the push and pull term
margin: scalar, margin for the algorithm
Output:
loss: scalar, the LMNN loss
D_pull: ? x 1 vector, with pull distance terms
D_tN: ? x 1 vector, with the first push distance terms
D_im: ? x 1 vector, with the second push distance terms
'''
with tf.name_scope('LMNN_loss'):
margin = self.margin if margin is None else margin
# Calculate distance
D = self.metric_func(X, X) # N x N
# Gather relevant distances
D_pull = tf.gather_nd(D, tN)
D_tn = tf.gather_nd(D, tup[:,:2])
D_im = tf.gather_nd(D, tup[:,::2])
# Calculate pull and push loss
pull_loss = tf.reduce_sum(D_pull)
push_loss = tf.reduce_sum(margin + D_tn - D_im)
# Total loss
loss = (1-mu) * pull_loss + mu * push_loss
return loss, D_pull, D_tn, D_im
def fit(self, Xtrain, ytrain, k, mu=0.5, maxEpoch=100, learning_rate=1e-4,
batch_size=50, val_set=None, run_id = None, snapshot=10):
""" Function for training the LMNN algorithm
Arguments:
Xtrain: Tensor of data [N, ?]
ytrain: Vector of labels [N, ]
k: integer, number of target neighbours
mu: float, in interval [0, 1]. Weighting between push and pull term
maxEpoch: integer, maximum number of iterations to train
learning_rate: float>0, learning rate for optimizer
batch_size: integer, number of samples to evaluate in each step
val_set: tuple, with two elements with same format as Xtrain, ytrain
run_id: str, name of the folder where results are stored
snapshot: integer, determining how often the accuracy should be
evaluated
"""
# Tensorboard file writers
run_id = datetime.datetime.now().strftime('%Y_%m_%d_%H_%M') if run_id \
is None else run_id
loc = self.dir_loc + '/' + run_id
if not os.path.exists(self.dir_loc): os.makedirs(self.dir_loc)
if self.verbose == 2:
self.train_writer = tf.summary.FileWriter(loc + '/train')
# Check for validation set
validation = False
if val_set:
validation = True
Xval, yval = val_set
if self.verbose == 2:
self.val_writer = tf.summary.FileWriter(loc + '/val')
# Training parameters
Xtrain = Xtrain.astype('float32')
ytrain = ytrain.astype('int32')
N_train = Xtrain.shape[0]
n_batch_train = int(np.ceil(N_train / batch_size))
print(50*'-')
print('Number of training samples: ', N_train)
if validation:
Xval = Xval.astype('float32')
yval = yval.astype('int32')
N_val = Xval.shape[0]
n_batch_val = int(np.ceil(N_val / batch_size))
print('Number of validation samples: ', N_val)
print(50*'-')
# Target neighbours
tN = self.findTargetNeighbours(Xtrain, ytrain, k, name='Training')
if validation:
tN_val = self.findTargetNeighbours(Xval, yval, k, name='Validation')
# Placeholders for data
global_step = tf.Variable(0, trainable=False)
Xp = tf.placeholder(tf.float32, shape=(None, *Xtrain.shape[1:]), name='In_features')
yp = tf.placeholder(tf.int32, shape=(None,), name='In_targets')
tNp = tf.placeholder(tf.int32, shape=(None, 2), name='In_targetNeighbours')
# Imposters
tup = self.tf_findImposters(Xp, yp, tNp)
# Loss func and individual distance terms
LMNN_loss, D_1, D_2, D_3 = self.tf_LMNN_loss(Xp, yp, tNp, tup, mu)
# Optimizer
optimizer = self.optimizer(learning_rate = learning_rate)
trainer = optimizer.minimize(LMNN_loss, global_step=global_step)
# Summaries
n_tup = tf.shape(tup)[0]
true_imp = tf.cast(tf.less(D_3, D_2), tf.float32)
tf.summary.scalar('Loss', LMNN_loss)
tf.summary.scalar('Num_imp', n_tup)
tf.summary.scalar('Loss_pull', tf.reduce_sum(D_1))
tf.summary.scalar('Loss_push', tf.reduce_sum(self.margin + D_2 - D_3))
tf.summary.histogram('Rel_push_dist', D_3 / (D_2 + self.margin))
tf.summary.scalar('True_imp', tf.reduce_sum(true_imp))
tf.summary.scalar('Frac_true_imp', tf.reduce_mean(true_imp))
merged = tf.summary.merge_all()
# Initilize
init = tf.global_variables_initializer()
self.session.run(init)
if self.verbose==2: self.train_writer.add_graph(self.session.graph)
# Training
stats = stat_logger(maxEpoch, n_batch_train, verbose=self.verbose)
stats.on_train_begin() # Start training
for e in range(maxEpoch):
stats.on_epoch_begin() # Start epoch
# Permute target neighbours
tN = np.random.permutation(tN)
# Do backpropagation
for b in range(n_batch_train):
stats.on_batch_begin() # Start batch
# Sample target neighbours and extract data from these
tN_batch = tN[k*batch_size*b:k*batch_size*(b+1)]
idx, inv_idx = | np.unique(tN_batch, return_inverse=True) | numpy.unique |
# -*- coding: utf-8 -*-
"""
Created on Sat Jul 29 13:35:12 2017
@author: yu
"""
import os
import numpy as np
import scipy.linalg as linalg
import cv2
import operator
import matplotlib.pyplot as plt
def ComputeNorm(x):
# function r=ComputeNorm(x)
# computes vector norms of x
# x: d x m matrix, each column a vector
# r: 1 x m matrix, each the corresponding norm (L2)
[row, col] = x.shape
r = np.zeros((1,col))
for i in range(col):
r[0,i] = linalg.norm(x[:,i])#求每一个列向量的范数
return r
def myLDA(A,Labels):
# function [W,m]=myLDA(A,Label)
# computes LDA of matrix A
# A: D by N data matrix. Each column is a random vector
# W: D by K matrix whose columns are the principal components in decreasing order
# m: mean of each projection
classLabels = np.unique(Labels)
classNum = len(classLabels)
dim,datanum = A.shape
totalMean = np.mean(A,1)
partition = [np.where(Labels==label)[0] for label in classLabels]
classMean = [(np.mean(A[:,idx],1),len(idx)) for idx in partition]
#compute the within-class scatter matrix
W = | np.zeros((dim,dim)) | numpy.zeros |
from __future__ import print_function
import string
import sys
import os
from collections import deque
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
plt.switch_backend('Agg')
import tensorflow as tf
import keras
keras.backend.image_data_format()
from keras import backend as K
from keras import regularizers
from keras.layers import Input, Dense, Reshape, Lambda, Conv1D, Flatten, MaxPooling1D, UpSampling1D, GlobalMaxPooling1D
from keras.layers import LSTM, Bidirectional, BatchNormalization, Dropout, Concatenate, Embedding, Activation, Dot, dot
from keras.models import Model, clone_model, Sequential
from keras.optimizers import Adam
from keras.callbacks import EarlyStopping,ModelCheckpoint
from keras.constraints import unitnorm
from keras_layer_normalization import LayerNormalization
tf.keras.backend.set_floatx('float32')
import sklearn as sk
from sklearn.base import BaseEstimator, _pprint
from sklearn.utils import check_array, check_random_state
from sklearn.utils.validation import check_is_fitted
from sklearn.preprocessing import StandardScaler
from sklearn.manifold import LocallyLinearEmbedding, MDS, Isomap, TSNE
from sklearn.decomposition import PCA, IncrementalPCA, KernelPCA, SparsePCA, TruncatedSVD, FastICA, NMF, MiniBatchDictionaryLearning
from sklearn.random_projection import GaussianRandomProjection, SparseRandomProjection
from sklearn.linear_model import LinearRegression, LogisticRegression
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import KFold, GroupKFold, train_test_split
from sklearn.metrics import mean_squared_error, explained_variance_score, mean_absolute_error, median_absolute_error, r2_score
from sklearn.metrics import average_precision_score, precision_score, recall_score, f1_score, roc_auc_score, matthews_corrcoef
from sklearn.metrics import roc_curve, precision_recall_curve, RocCurveDisplay, PrecisionRecallDisplay
from sklearn.metrics import roc_auc_score,accuracy_score,matthews_corrcoef
from scipy import stats
from scipy.stats import multivariate_normal, kurtosis, skew, pearsonr, spearmanr
import processSeq
from processSeq import load_seq_1, kmer_dict, load_signal_1, load_seq_2, load_seq_2_kmer, load_seq_altfeature
import xgboost
import pickle
import os.path
from optparse import OptionParser
import time
from timeit import default_timer as timer
import utility_1
from utility_1 import mapping_Idx
import h5py
import json
# generate sequences
# idx_sel_list: chrom, serial
# seq_list: relative positions
def generate_sequences(idx_sel_list, gap_tol=5, region_list=[]):
chrom = idx_sel_list[:,0]
chrom_vec = np.unique(chrom)
chrom_vec = np.sort(chrom_vec)
seq_list = []
print(len(chrom),chrom_vec)
for chrom_id in chrom_vec:
b1 = np.where(chrom==chrom_id)[0]
t_serial = idx_sel_list[b1,1]
prev_serial = t_serial[0:-1]
next_serial = t_serial[1:]
distance = next_serial-prev_serial
b2 = np.where(distance>gap_tol)[0]
if len(b2)>0:
if len(region_list)>0:
# print('region_list',region_list,len(b2))
b_1 = np.where(region_list[:,0]==chrom_id)[0]
# print(b2)
t_serial = idx_sel_list[b2,1]
if len(b_1)>0:
# b2 = np.setdiff1d(b2,region_list[b_1,1])
# print(region_list,region_list[b_1,1],len(b2))
t_id1 = utility_1.mapping_Idx(t_serial,region_list[b_1,1])
t_id1 = t_id1[t_id1>=0]
t_id2 = b2[t_id1]
b2 = np.setdiff1d(b2,t_id2)
# print(len(b2))
# print(idx_sel_list[b2])
# return
# print('gap',len(b2))
if len(b2)>0:
t_seq = list(np.vstack((b2[0:-1]+1,b2[1:])).T)
t_seq.insert(0,np.asarray([0,b2[0]]))
t_seq.append(np.asarray([b2[-1]+1,len(b1)-1]))
else:
t_seq = [np.asarray([0,len(b1)-1])]
# print(t_seq)
# print(chrom_id,len(t_seq),max(distance))
seq_list.extend(b1[np.asarray(t_seq)])
return np.asarray(seq_list)
# select sample
def sample_select2a1(x_mtx, y, idx_sel_list, seq_list, tol=5, L=5):
num_sample = len(idx_sel_list)
num1 = len(seq_list)
size1 = 2*L+1
print(num_sample,num1,size1)
feature_dim = x_mtx.shape[1]
vec1_local = np.zeros((num_sample,size1),dtype=int)
vec1_serial = np.zeros((num_sample,size1),dtype=int)
feature_mtx = np.zeros((num_sample,size1,feature_dim),dtype=np.float32)
signal_mtx = np.zeros((num_sample,size1))
ref_serial = idx_sel_list[:,1]
id_vec = np.zeros(num_sample,dtype=np.int8)
for i in range(0,num1):
s1, s2 = seq_list[i][0], seq_list[i][1]+1
serial = ref_serial[s1:s2]
id_vec[s1:s2] = 1
# print('start stop',s1,s2,serial)
num2 = len(serial)
t1 = np.outer(list(range(s1,s2)),np.ones(size1))
t2 = t1 + np.outer(np.ones(num2),list(range(-L,L+1)))
t2[t2<s1] = s1
t2[t2>=s2] = s2-1
idx = np.int64(t2)
# print(idx)
vec1_local[s1:s2] = idx
vec1_serial[s1:s2] = ref_serial[idx]
feature_mtx[s1:s2] = x_mtx[idx]
signal_mtx[s1:s2] = y[idx]
# if i%10000==0:
# print(i,num2,vec1_local[s1],vec1_serial[s1])
id1 = np.where(id_vec>0)[0]
num2 = len(id1)
if num2<num_sample:
feature_mtx, signal_mtx = feature_mtx[id1], signal_mtx[id1]
# vec1_serial, vec1_local = vec1_serial[id1], vec1_local[id1]
vec1_serial = vec1_serial[id1]
id_1 = -np.ones(sample_num,dtype=np.int64)
id_1[id1] = np.arange(num2)
vec1_local = id_1[vec1_local]
b1 = np.where(vec1_local<0)[0]
if len(b1)>0:
print('error!',b1)
return -1
# signal_mtx = signal_mtx[:,np.newaxis]
signal_mtx = np.expand_dims(signal_mtx, axis=-1)
# signal_mtx = np.expand_dims(signal_ntx, axis=-1)
return feature_mtx, signal_mtx, vec1_serial, vec1_local
def score_2a(y, y_predicted):
score1 = mean_squared_error(y, y_predicted)
score2 = pearsonr(y, y_predicted)
score3 = explained_variance_score(y, y_predicted)
score4 = mean_absolute_error(y, y_predicted)
score5 = median_absolute_error(y, y_predicted)
score6 = r2_score(y, y_predicted)
score7, pvalue = spearmanr(y,y_predicted)
# vec1 = [score1, score2[0], score2[1], score3, score4, score5, score6]
vec1 = [score1, score2[0], score2[1], score3, score4, score5, score6, score7, pvalue]
return vec1
def read_phyloP(species_name):
path1 = './'
filename1 = '%s/estimate_rt/estimate_rt_%s.txt'%(path1,species_name)
# filename2a = 'test_seq_%s.1.txt'%(species_name)
file1 = pd.read_csv(filename1,sep='\t')
col1, col2, col3 = '%s.chrom'%(species_name), '%s.start'%(species_name), '%s.stop'%(species_name)
chrom_ori, start_ori, stop_ori, serial_ori = np.asarray(file1[col1]), np.asarray(file1[col2]), np.asarray(file1[col3]), np.asarray(file1['serial'])
num_sample = len(chrom_ori)
chrom_vec = np.unique(chrom_ori)
chrom_vec = ['chr22']
for chrom_id in chrom_vec:
filename1 = '%s/phyloP/hg19.phyloP100way.%s.bedGraph'%(path1,chrom_id)
data1 = pd.read_csv(filename1,header=None,sep='\t')
chrom, start, stop, score = data1[0], data1[1], data1[2], data1[3]
len1 = stop-start
b = np.where(chrom_ori==chrom_id)[0]
num_sample1 = len(b)
vec1 = np.zeros((num_sample1,16))
print(chrom_id,len(chrom),len(b))
cnt = 0
b1 = [-1]
for i in b:
t1 = b1[-1]+1
b1 = np.where((start[t1:]>=start_ori[i])&(stop[t1:]<stop_ori[i]))[0]+t1
if len(b1)==0:
b1 = [-1]
continue
t_len1, t_score = np.asarray(len1[b1]), np.asarray(score[b1])
s1 = 0
s2 = np.sum(t_len1)
i1 = cnt
for j in range(0,12):
temp1 = (j-8)*2.5
b2 = np.where((t_score<temp1+2.5)&(t_score>=temp1))[0]
print(b2)
vec1[i1,j] = np.sum(t_len1[b2])*1.0/s2
s1 = s1+temp1*vec1[i1,j]
vec1[i1,12] = s1 # average
vec1[i1,13] = np.median(t_score)
vec1[i1,14] = np.max(t_score)
vec1[i1,15] = np.min(t_score)
cnt += 1
if cnt%1000==0:
print(cnt,len(b1),s2,vec1[i1,12:16])
break
# dict1 = dict()
# dict1['vec'], dict1['index'] = vec1,b
# np.save('phyloP_%s'%(chrom_id),dict1,allow_pickle=True)
fields = ['index']
for j in range(0,12):
temp1 = (j-8)*2.5
fields.append('%s-%s'%(temp1,temp1+2.5))
fields.extend(range(0,4))
data1 = pd.DataFrame(data = np.hstack((b[:,np.newaxis],vec1)),columns=fields)
data1.to_csv('phyloP_%s.txt'%(chrom_id),sep='\t',index=False)
return vec1
def read_phyloP_1(ref_filename,header,file_path,chrom_vec,n_level=15,offset=10,magnitude=2):
file1 = pd.read_csv(ref_filename,header=header,sep='\t')
# col1, col2, col3 = '%s.chrom'%(species_name), '%s.start'%(species_name), '%s.stop'%(species_name)
colnames = list(file1)
col1, col2, col3, col4 = colnames[0], colnames[1], colnames[2], colnames[3]
chrom_ori, start_ori, stop_ori, serial_ori = np.asarray(file1[col1]), np.asarray(file1[col2]), np.asarray(file1[col3]), np.asarray(file1[col4])
num_sample = len(chrom_ori)
# chrom_vec = np.unique(chrom_ori)
# chrom_vec = [chrom_id]
# n_level, offset, magnitude = 15, 10, 2
score_max = (n_level-offset)*magnitude
for chrom_id in chrom_vec:
# filename1 = '%s/hg19.phyloP100way.%s.bedGraph'%(file_path,chrom_id)
filename1 = '%s/chr%s.phyloP100way.bedGraph'%(file_path,chrom_id)
data1 = pd.read_csv(filename1,header=None,sep='\t')
chrom, start, stop, score = data1[0], data1[1], data1[2], data1[3]
len1 = stop-start
chrom_id1 = 'chr%s'%(chrom_id)
b = np.where(chrom_ori==chrom_id1)[0]
num_sample1 = len(b)
vec1 = np.zeros((num_sample1,n_level+4))
print(chrom_id,len(chrom),len(b))
cnt = 0
m_idx = len(start)-1
start_idx = 0
print("number of regions", len(b))
for i in b:
t_start, t_stop = start_ori[i], stop_ori[i] # position of zero region
position = [t_start,t_stop]
if start_idx<=m_idx:
b1, start_idx = utility_1.search_region_include(position, start, stop, m_idx, start_idx)
# print(count,t_start,t_stop,t_stop-t_start,start_idx,len(id3))
if len(b1)==0:
continue
t_len1, t_score = np.asarray(len1[b1]), np.asarray(score[b1])
t_score[t_score>score_max] = score_max-1e-04
s1 = 0
s2 = np.sum(t_len1)
for j in range(0,n_level):
temp1 = (j-offset)*magnitude
b2 = np.where((t_score<temp1+magnitude)&(t_score>=temp1))[0]
# print(b2)
vec1[cnt,j] = np.sum(t_len1[b2])*1.0/s2
s1 = s1+temp1*vec1[cnt,j]
vec1[cnt,n_level:n_level+4] = [s1,np.median(t_score),np.max(t_score),np.min(t_score)]
cnt += 1
pre_b1 = b1
if cnt%1000==0:
print(chrom_id,cnt,len(b1),s2,vec1[cnt,-4:])
# break
# dict1 = dict()
# dict1['vec'], dict1['index'] = vec1,b
# np.save('phyloP_%s'%(chrom_id),dict1,allow_pickle=True)
fields = ['index']
for j in range(0,n_level):
temp1 = (j-offset)*magnitude
fields.append('%s-%s'%(temp1,temp1+magnitude))
fields.extend(range(0,4))
idx = serial_ori[b]
data1 = pd.DataFrame(data = np.hstack((idx[:,np.newaxis],vec1)),columns=fields)
data1.to_csv('phyloP_%s.txt'%(chrom_id),sep='\t',index=False)
return vec1
def read_motif_1(filename,output_filename=-1):
data1 = pd.read_csv(filename,sep='\t')
colnames = list(data1)
col1, col2, col3 = colnames[0], colnames[1], colnames[2]
chrom, start, stop = np.asarray(data1[col1]), np.asarray(data1[col2]), np.asarray(data1[col3])
region_len = stop-start
m1, m2, median_len = np.max(region_len), np.min(region_len), np.median(region_len)
b1 = np.where(region_len!=median_len)[0]
print(m1,m2,median_len,len(b1))
bin_size = median_len
motif_name = colnames[3:]
mtx1 = np.asarray(data1.loc[:,motif_name])
mtx1 = mtx1*1000.0/np.outer(region_len,np.ones(mtx1.shape[1]))
print('motif',len(motif_name))
print(mtx1.shape)
print(np.max(mtx1),np.min(mtx1),np.median(mtx1))
if output_filename!=-1:
fields = colnames
data1 = pd.DataFrame(columns=fields)
data1[colnames[0]], data1[colnames[1]], data1[colnames[2]] = chrom, start, stop
num1 = len(fields)-3
for i in range(0,num1):
data1[colnames[i+3]] = mtx1[:,i]
data1.to_csv(output_filename,header=True,index=False,sep='\t')
print(output_filename, data1.shape)
return mtx1, chrom, start, stop, colnames
def read_gc_1(ref_filename,header,filename,output_filename):
sel_idx = []
file1 = pd.read_csv(ref_filename,header=header,sep='\t')
f_list = load_seq_altfeature(filename,sel_idx)
# col1, col2, col3 = '%s.chrom'%(species_name), '%s.start'%(species_name), '%s.stop'%(species_name)
colnames = list(file1)
col1, col2, col3, col4 = colnames[0], colnames[1], colnames[2], colnames[3]
chrom_ori, start_ori, stop_ori, serial_ori = np.asarray(file1[col1]), np.asarray(file1[col2]), np.asarray(file1[col3]), np.asarray(file1[col4])
num_sample = len(chrom_ori)
if num_sample!=f_list.shape[0]:
print('error!',num_sample,f_list.shape[0])
fields = ['chrom','start','stop','serial','GC','GC_N','GC_skew']
file2 = pd.DataFrame(columns=fields)
file2['chrom'], file2['start'], file2['stop'], file2['serial'] = chrom_ori, start_ori, stop_ori, serial_ori
for i in range(0,3):
file2[fields[i+4]] = f_list[:,i]
file2.to_csv(output_filename,index=False,sep='\t')
return f_list
def generate_serial(filename1,chrom,start,stop):
# chrom_vec = np.sort(np.unique(chrom))
# print(chrom_vec)
chrom_vec = []
for i in range(1,23):
chrom_vec.append('chr%d'%(i))
chrom_vec += ['chrX']
chrom_vec += ['chrY']
print(chrom_vec)
# print(chrom)
print(len(chrom))
data1 = pd.read_csv(filename1,header=None,sep='\t')
ref_chrom, chrom_size = np.asarray(data1[0]), np.asarray(data1[1])
serial_start = 0
serial_vec = np.zeros(len(chrom))
bin_size = stop[1]-start[1]
print(bin_size)
for chrom_id in chrom_vec:
b1 = np.where(ref_chrom==chrom_id)[0]
t_size = chrom_size[b1[0]]
b2 = np.where(chrom==chrom_id)[0]
if len(b1)>0:
size1 = int(np.ceil(t_size*1.0/bin_size))
serial = np.int64(start[b2]/bin_size)+serial_start
serial_vec[b2] = serial
print(chrom_id,b2,len(serial),serial_start,size1)
serial_start = serial_start+size1
else:
print("error!")
return
return np.int64(serial_vec)
def generate_serial_local(filename1,chrom,start,stop,chrom_num):
# chrom_vec = np.sort(np.unique(chrom))
# print(chrom_vec)
chrom_vec = []
for i in range(1,chrom_num+1):
chrom_vec.append('chr%d'%(i))
chrom_vec += ['chrX']
chrom_vec += ['chrY']
chrom_vec += ['chrM']
print(chrom_vec)
print(chrom)
print(len(chrom))
t_chrom = np.unique(chrom)
data1 = pd.read_csv(filename1,header=None,sep='\t')
ref_chrom, chrom_size = np.asarray(data1[0]), np.asarray(data1[1])
# serial_start = np.zeros(len(chrom))
serial_start = 0
serial_start_1 = dict()
serial_vec = np.zeros(len(chrom))
bin_size = stop[1]-start[1]
print(bin_size)
for chrom_id in chrom_vec:
b1 = np.where(ref_chrom==chrom_id)[0]
t_size = chrom_size[b1[0]]
serial_start_1[chrom_id] = serial_start
size1 = int(np.ceil(t_size*1.0/bin_size))
serial_start = serial_start+size1
for chrom_id in t_chrom:
b2 = np.where(chrom==chrom_id)
serial = np.int64(start[b2]/bin_size)+serial_start_1[chrom_id]
serial_vec[b2] = serial
return np.int64(serial_vec)
def generate_serial_start(filename1,chrom,start,stop,chrom_num=19):
# chrom_vec = np.sort(np.unique(chrom))
# print(chrom_vec)
chrom_vec = []
for i in range(1,chrom_num+1):
chrom_vec.append('chr%d'%(i))
chrom_vec += ['chrX']
chrom_vec += ['chrY']
print(chrom_vec)
print(chrom)
print(len(chrom))
data1 = pd.read_csv(filename1,header=None,sep='\t')
ref_chrom, chrom_size = np.asarray(data1[0]), np.asarray(data1[1])
serial_start = 0
serial_vec = -np.ones(len(chrom))
bin_size = stop[1]-start[1]
print(bin_size)
start_vec = dict()
for chrom_id in chrom_vec:
start_vec[chrom_id] = serial_start
b1 = np.where(ref_chrom==chrom_id)[0]
t_size = chrom_size[b1[0]]
b2 = np.where(chrom==chrom_id)[0]
if len(b1)>0:
size1 = int(np.ceil(t_size*1.0/bin_size))
serial = np.int64(start[b2]/bin_size)+serial_start
serial_vec[b2] = serial
print(chrom_id,b2,len(serial),serial_start,size1)
serial_start = serial_start+size1
else:
print("error!")
return
return np.int64(serial_vec), start_vec
def shuffle_array(vec):
num1 = len(vec)
idx = np.random.permutation(num1)
vec = vec[idx]
return vec, idx
# input: estimated attention, type_id: training, validation, or test data
# output: ranking of attention
def select_region1_sub(filename,type_id):
data1 = pd.read_csv(filename,sep='\t')
colnames = list(data1)
# chrom start stop serial signal predicted_signal predicted_attention
chrom, start, serial = data1['chrom'], data1['start'], data1['serial']
chrom, start, serial = np.asarray(chrom), np.asarray(start), np.asarray(serial)
predicted_attention = data1['predicted_attention']
predicted_attention = np.asarray(predicted_attention)
ranking = stats.rankdata(predicted_attention,'average')/len(predicted_attention)
rank1 = np.zeros((len(predicted_attention),2))
rank1[:,0] = ranking
chrom_vec = np.unique(chrom)
for t_chrom in chrom_vec:
b1 = np.where(chrom==t_chrom)[0]
t_attention = predicted_attention[b1]
t_ranking = stats.rankdata(t_attention,'average')/len(t_attention)
rank1[b1,1] = t_ranking
data1['Q1'] = rank1[:,0] # rank across all the included chromosomes
data1['Q2'] = rank1[:,1] # rank by each chromosome
data1['typeId'] = np.int8(type_id*np.ones(len(rank1)))
return data1,chrom_vec
# merge estimated attention from different training/test splits
# type_id1: chromosome order; type_id2: training: 0, test: 1, valid: 2
def select_region1_merge(filename_list,output_filename,type_id1=0,type_id2=1):
list1 = []
chrom_numList = []
# b1 = np.where((self.chrom!='chrX')&(self.chrom!='chrY'))[0]
# ref_chrom, ref_start, ref_serial = self.chrom[b1], self.start[b1], self.serial[b1]
# num_sameple = len(ref_chrom)
i = 0
serial1 = []
num1 = len(filename_list)
vec1 = list(range(num1))
if type_id1==1:
vec1 = list(range(num1-1,-1,-1))
for i in vec1:
filename1 = filename_list[i]
# data1: chrom, start, stop, serial, signal, predicted_signal, predicted_attention, Q1, Q2, typeId
# typeId: training: 0, test: 1, valid: 2
data1, chrom_vec = select_region1_sub(filename1,type_id2)
print(filename1,len(data1))
# list1.append(data1)
# if i==0:
# serial1 = np.asarray(data1['serial'])
t_serial = np.asarray(data1['serial'],dtype=np.int64)
t_serial2 = np.setdiff1d(t_serial,serial1)
serial1 = np.union1d(serial1,t_serial)
id1 = mapping_Idx(t_serial,t_serial2)
colnames = list(data1)
data1 = data1.loc[id1,colnames]
list1.append(data1)
chrom_numList.append(chrom_vec)
data2 = pd.concat(list1, axis=0, join='outer', ignore_index=True,
keys=None, levels=None, names=None, verify_integrity=False, copy=True)
print('sort')
data2 = data2.sort_values(by=['serial'])
data2.to_csv(output_filename,index=False,sep='\t')
return data2, chrom_numList
class Reader(object):
def __init__(self, ref_filename, feature_idvec = [1,1,1,1]):
# Initializes RepliSeq
self.ref_filename = ref_filename
self.feature_idvec = feature_idvec
def generate_serial(self,filename1,filename2,output_filename,header=None):
data1 = pd.read_csv(filename2, header=header, sep='\t')
colnames = list(data1)
chrom, start, stop = np.asarray(data1[colnames[0]]), np.asarray(data1[colnames[1]]), np.asarray(data1[colnames[2]])
serial_vec, start_vec = generate_serial_start(filename1,chrom,start,stop)
if output_filename!=None:
colnames2 = colnames[0:3]+['serial']+colnames[3:]
data2 = pd.DataFrame(columns=colnames2)
data2['serial'] = serial_vec
for colname1 in colnames:
data2[colname1] = data1[colname1]
flag = False
if header!=None:
flag = True
data2.to_csv(output_filename,header=flag,index=False,sep='\t')
return serial_vec, start_vec
def load_motif(self,filename1,motif_filename,output_filename):
# output_filename = None
# ref_filename = 'hg38.5k.serial.bed'
# motif_filename = 'hg38.motif.count.txt'
# output_filename1 = None
mtx1, chrom, start, stop, colnames = read_motif_1(motif_filename)
serial_vec, start_vec = generate_serial_start(filename1,chrom,start,stop)
if output_filename!=None:
colnames2 = ['chrom','start','stop','serial']
data2 = pd.DataFrame(columns=colnames2)
data2['chrom'], data2['start'], data2['stop'], data2['serial'] = chrom, start, stop, serial_vec
data3 = pd.DataFrame(columns=colnames[3:],data=mtx1)
data1 = pd.concat([data2,data3], axis=1, join='outer', ignore_index=True,
keys=None, levels=None, names=None, verify_integrity=False, copy=True)
data1.to_csv(output_filename,header=True,index=False,sep='\t')
print('data1',data1.shape)
return True
class ConvergenceMonitor(object):
_template = "{iter:>10d} {logprob:>16.4f} {delta:>+16.4f}"
def __init__(self, tol, n_iter, verbose):
self.tol = tol
self.n_iter = n_iter
self.verbose = verbose
self.history = deque(maxlen=2)
self.iter = 0
def __repr__(self):
class_name = self.__class__.__name__
params = dict(vars(self), history=list(self.history))
return "{0}({1})".format(
class_name, _pprint(params, offset=len(class_name)))
def report(self, logprob):
if self.verbose:
delta = logprob - self.history[-1] if self.history else np.nan
message = self._template.format(
iter=self.iter + 1, logprob=logprob, delta=delta)
print(message, file=sys.stderr)
self.history.append(logprob)
self.iter += 1
@property
def converged(self):
return (self.iter == self.n_iter or
(len(self.history) == 2 and
self.history[1] - self.history[0] < self.tol))
class _Base1(BaseEstimator):
def __init__(self, file_path, species_id, resolution, run_id, generate,
chromvec,test_chromvec,
featureid,type_id,cell,method,ftype,ftrans,tlist,
flanking,normalize,
config,
attention=1,feature_dim_motif=1,
kmer_size=[6,5]):
# Initializes RepliSeq
self.run_id = run_id
self.cell = cell
self.generate = generate
self.train_chromvec = chromvec
self.chromosome = chromvec[0]
print('train_chromvec',train_chromvec)
print('test_chromvec',test_chromvec)
self.test_chromvec = test_chromvec
self.config = config
self.n_epochs = config['n_epochs']
self.species_id = species_id
self.type_id = type_id
self.cell_type = cell
self.cell_type1 = config['celltype_id']
self.method = method
self.ftype = ftype
self.ftrans = ftrans[0]
self.ftrans1 = ftrans[1]
self.t_list = tlist
self.flanking = flanking
self.flanking1 = 3
self.normalize = normalize
self.batch_size = config['batch_size']
# config = dict(output_dim=hidden_unit,fc1_output_dim=fc1,fc2_output_dim=fc2,units1=units1[0],
# units2=units1[1],n_epochs=n_epochs,batch_size=batch_size)
# config['feature_dim_vec'] = units1[2:]
self.tol = config['tol']
self.attention = attention
self.attention_vec = [12,17,22,32,51,52,58,60]
self.attention_vec1 = [1]
self.lr = config['lr']
self.step = config['step']
self.feature_type = -1
self.kmer_size = kmer_size
self.activation = config['activation']
self.min_delta = config['min_delta']
self.chromvec_sel = chromvec
self.feature_dim_transform = config['feature_dim_transform']
feature_idvec = [1,1,1,1]
# ref_filename = 'hg38_5k_serial.bed'
if 'ref_filename' in config:
ref_filename = config['ref_filename']
else:
ref_filename = 'hg38_5k_serial.bed'
self.reader = Reader(ref_filename, feature_idvec)
self.predict_type_id = 0
self.method = method
self.train = self.config['train_mode']
self.path = file_path
self.model_path = '%s/test_%d.h5'%(self.path,run_id)
self.pos_code = config['pos_code']
self.feature_dim_select1 = config['feature_dim_select']
self.method_vec = [[11,31],[22,32,52,17,51,58,60],[56,62]]
self.resolution = resolution
# if self.species_id=='mm10':
# self.cell_type1 = config['cell_type1']
if 'cell_type1' in self.config:
self.cell_type1 = config['cell_type1']
if ('load_type' in self.config) and (self.config['load_type']==1):
self.load_type = 1
else:
self.load_type = 0
if (method>10) and not(method in [56]) :
self.predict_context = 1
else:
self.predict_context = 0
if ftype[0]==-5:
self.feature_idx1= -5 # full dimensions
elif ftype[0]==-6:
self.feature_idx1 = -6 # frequency dimensions
else:
self.feature_idx1 = ftype
if 'est_attention_type1' in self.config:
self.est_attention_type1 = self.config['est_attention_type1']
else:
self.est_attention_type1 = 1
if 'est_attention_sel1' in self.config:
self.est_attention_sel1 = self.config['est_attention_sel1']
else:
self.est_attention_sel1 = 0
# self.feature_idx = [0,2]
self.feature_idx = featureid
self.x, self.y = dict(), dict() # feature matrix and signals
self.vec = dict() # serial
self.vec_local = dict()
if self.species_id.find('hg')>=0:
self.chrom_num = 22
elif self.species_id.find('mm')>=0:
self.chrom_num = 19
else:
self.chrom_num = -1
self.region_list_test, self.region_list_train, self.region_list_valid = [],[],[]
if 'region_list_test' in config:
self.region_list_test = config['region_list_test']
if 'region_list_train' in config:
self.region_list_train = config['region_list_train']
if 'region_list_valid' in config:
self.region_list_valid = config['region_list_valid']
flag = False
if 'scale' in config:
flag = True
self.scale = config['scale']
else:
self.scale = [0,1]
if ('activation_basic' in config) and (config['activation_basic']=='tanh'):
if (flag==True) and (self.scale[0]>=0):
flag = False
if flag==False:
self.scale = [-1,1]
self.region_boundary = []
self.serial_vec = []
self.f_mtx = []
print('scale',self.scale)
print(self.test_chromvec)
filename1 = '%s_chr%s-chr%s_chr%s-chr%s'%(self.cell_type, self.train_chromvec[0], self.train_chromvec[-1], self.test_chromvec[0], self.test_chromvec[-1])
self.filename_load = filename1
print(self.filename_load,self.method,self.predict_context,self.attention)
self.set_generate(generate,filename1)
def load_ref_serial(self, ref_filename, header=None):
if header==None:
file1 = pd.read_csv(ref_filename,header=header,sep='\t')
else:
file1 = pd.read_csv(ref_filename,sep='\t')
colnames = list(file1)
# col1, col2, col3 = '%s.chrom'%(species_name), '%s.start'%(species_name), '%s.stop'%(species_name)
col1, col2, col3, col_serial = colnames[0], colnames[1], colnames[2], colnames[3]
self.chrom_ori, self.start_ori, self.stop_ori, self.serial_ori = np.asarray(file1[col1]), np.asarray(file1[col2]), np.asarray(file1[col3]), np.asarray(file1[col_serial])
print('load ref serial', self.serial_ori.shape)
return self.serial_ori
# load local serial and signal
def load_local_serial(self, filename1, header=None, region_list=[], type_id2=1, signal_normalize=1,region_list_1=[]):
if header==None:
file2 = pd.read_csv(filename1,header=header,sep='\t')
else:
file2 = pd.read_csv(filename1,sep='\t')
colnames = list(file2)
col1, col2, col3, col_serial = colnames[0], colnames[1], colnames[2], colnames[3]
# sort the table by serial
file2 = file2.sort_values(by=[col_serial])
self.chrom, self.start, self.stop, self.serial = np.asarray(file2[col1]), np.asarray(file2[col2]), np.asarray(file2[col3]), np.asarray(file2[col_serial])
b = np.where((self.chrom!='chrX')&(self.chrom!='chrY')&(self.chrom!='chrM'))[0]
self.chrom, self.start, self.stop, self.serial = self.chrom[b], self.start[b], self.stop[b], self.serial[b]
if self.chrom_num>0:
chrom_num = self.chrom_num
else:
chrom_num = len(np.unique(self.chrom))
chrom_vec = [str(i) for i in range(1,chrom_num+1)]
print('chrom_vec', chrom_vec)
self.bin_size = self.stop[1]-self.start[1]
scale = self.scale
if len(colnames)>=5:
col_signal = colnames[4]
self.signal = np.asarray(file2[col_signal])
self.signal = self.signal[b]
self.signal_pre = self.signal.copy()
if signal_normalize==1:
if self.run_id>10:
# self.signal = signal_normalize(self.signal,[0,1]) # normalize signals
self.signal_pre1, id1, signal_vec1 = self.signal_normalize_chrom(self.chrom,self.signal,chrom_vec,scale)
if not('train_signal_update' in self.config) or (self.config['train_signal_update']==1):
train_signal, id2, signal_vec2 = self.signal_normalize_chrom(self.chrom,self.signal,self.train_chromvec,scale)
id_1 = mapping_Idx(id1,id2)
self.signal = self.signal_pre1.copy()
self.signal[id_1] = train_signal
else:
self.signal = self.signal_pre1.copy()
else:
print('signal_normalize_bychrom')
self.signal, id1, signal_vec = self.signal_normalize_bychrom(self.chrom,self.signal,chrom_vec,scale)
else:
self.signal = np.ones(len(b))
# print(self.signal.shape)
print('load local serial', self.serial.shape, self.signal.shape, np.max(self.signal), np.min(self.signal))
if 'tol_region_search' in self.config:
tol = self.config['tol_region_search']
else:
tol = 2
# only train or predict on some regions
print('load_local_serial',len(self.chrom))
if len(region_list_1)>0:
num1 = len(region_list_1)
list1 = []
for i in range(num1):
t_region = region_list_1[i]
t_chrom, t_start, t_stop = 'chr%d'%(t_region[0]), t_region[1], t_region[2]
t_id1 = np.where((self.chrom==t_chrom)&(self.start<t_stop)&(self.stop>t_start))[0]
list1.extend(t_id1)
b1 = np.asarray(list1)
self.chrom, self.start, self.stop, self.serial = self.chrom[b1], self.start[b1], self.stop[b1], self.serial[b1]
print('load_local_serial',num1,len(self.chrom))
print(region_list_1)
if len(region_list)>0:
# print('load_local_serial',region_list)
# id1, region_list = self.region_search_1(chrom,start,stop,serial,region_list)
id1, region_list = self.region_search_1(self.chrom,self.start,self.stop,self.serial,region_list,type_id2,tol)
self.chrom, self.start, self.stop, self.serial, self.signal = self.chrom[id1], self.start[id1], self.stop[id1], self.serial[id1], self.signal[id1]
id2 = self.region_search_boundary(self.chrom,self.start,self.stop,self.serial,region_list)
# print('region_search_boundary', id2[:,0], self.start[id2[:,1:3]],self.stop[id2[:,1:3]])
self.region_boundary = id2
# print(self.serial[id2[:,1:3]])
print('region_boundary',id2)
# return
else:
print('load_local_serial',region_list)
# assert len(region_list)>0
# return
return self.serial, self.signal
# training, validation and test data index
def prep_training_test(self,train_sel_list_ori):
train_id1, test_id1, y_signal_train1, y_signal_test, train1_sel_list, test_sel_list = self.generate_train_test_1(train_sel_list_ori)
self.idx_list = {'test':test_id1}
self.y_signal = {'test':y_signal_test}
if len(y_signal_test)>0:
print('y_signal_test',np.max(y_signal_test),np.min(y_signal_test))
if len(y_signal_train1)>0:
print('y_signal_train',np.max(y_signal_train1),np.min(y_signal_train1))
self.idx_list.update({'train':[],'valid':[]})
else:
return
# y_signal_test_ori = signal_normalize(y_signal_test,[0,1])
# shuffle array
# x_test_trans, shuffle_id2 = shuffle_array(x_test_trans)
# test_sel_list = test_sel_list[shuffle_id2]
# x_train1_trans, shuffle_id1 = shuffle_array(x_train1_trans)
# train_sel_list = train_sel_list[shuffle_id1]
print(train1_sel_list[0:5])
# split training and validation data
if 'ratio1' in self.config:
ratio = self.config['ratio1']
else:
ratio = 0.95
if 'type_id1' in self.config:
type_id_1 = self.config['type_id1']
else:
type_id_1 = 0
idx_train, idx_valid, idx_test = self.generate_index_1(train1_sel_list, test_sel_list, ratio, type_id_1)
print('idx_train,idx_valid,idx_test', len(idx_train), len(idx_valid), len(idx_test))
if (len(self.region_list_train)>0) or (len(self.region_list_valid)>0):
idx_train, idx_valid = self.generate_train_test_2(train1_sel_list,idx_train,idx_valid)
print('idx_train,idx_valid', len(idx_train), len(idx_valid))
train_sel_list, val_sel_list = train1_sel_list[idx_train], train1_sel_list[idx_valid]
self.idx_list.update({'train':train_id1[idx_train],'valid':train_id1[idx_valid]})
self.idx_train_val = {'train':idx_train,'valid':idx_valid}
self.y_signal.update({'train':y_signal_train1[idx_train],'valid':y_signal_train1[idx_valid]})
return train_sel_list, val_sel_list, test_sel_list
# prepare data from predefined features: kmer frequency feature and motif feature
def prep_data_sub2(self,path1,file_prefix,type_id2,feature_dim1,feature_dim2,flag_1):
species_id = self.species_id
celltype_id = self.cell_type1
if species_id=='mm10':
kmer_dim_ori, motif_dim_ori = 100, 50
filename1 = '%s/%s_%d_%d_%d.npy'%(path1,file_prefix,type_id2,kmer_dim_ori,motif_dim_ori)
# filename2 = 'test_%s_genome%d_kmer7.h5'%(species_id,celltype_id)
filename2 = '%s_%d_kmer7_0_200_trans.h5'%(species_id,celltype_id)
else:
kmer_dim_ori, motif_dim_ori = 50, 50
filename1 = '%s/%s_%d_%d_%d.npy'%(path1,file_prefix,type_id2,kmer_dim_ori,motif_dim_ori)
# filename2 = 'test_%s_kmer7.h5'%(species_id)
filename2 = '%s_kmer7_0_200_trans.h5'%(species_id)
kmer_size1, kmer_size2, kmer_size3 = 5,6,7
x_train1_trans, train_sel_list_ori = [], []
flag1, flag2 = 0, 0
flag3 = True
# if kmer_size2 in self.kmer_size:
if flag3==True:
if os.path.exists(filename1)==True:
print("loading data...")
data1 = np.load(filename1,allow_pickle=True)
data_1 = data1[()]
x_train1_trans_ori, train_sel_list_ori = np.asarray(data_1['x1']), np.asarray(data_1['idx'])
print('train_sel_list',train_sel_list_ori.shape)
print('x_train1_trans',x_train1_trans_ori.shape)
if kmer_size2 in self.kmer_size:
flag1 = 1
serial1 = train_sel_list_ori[:,1]
dim1 = x_train1_trans_ori.shape[1]
if (self.feature_dim_motif==0) or (flag_1==True):
x_train1_trans = x_train1_trans_ori[:,0:-motif_dim_ori]
else:
# d1 = np.min((dim1-motif_dim_ori+feature_dim2,d1))
# d2 = dim1-motif_dim_ori
# sel_id1 = list(range(21))+list(range(21,21+feature_dim1))
# x_train1_trans_1 = x_train1_trans[:,sel_id1]
# x_train1_trans_2 = x_train1_trans[:,d2:d1]
x_train1_trans_1 = x_train1_trans_ori[:,0:dim1-motif_dim_ori]
x_train1_trans_2 = x_train1_trans_ori[:,dim1-motif_dim_ori:]
else:
print('data not found!')
print(filename1)
return x_train1_trans, trans_sel_list_ori
if kmer_size3 in self.kmer_size:
with h5py.File(filename2,'r') as fid:
serial2 = fid["serial"][:]
feature_mtx = fid["vec"][:]
# feature_mtx = feature_mtx[:,0:kmer_dim_ori]
print(serial2)
print(len(serial2),feature_mtx.shape)
flag2 = 1
if flag1==1:
if flag2==1:
t_serial = np.intersect1d(serial1,serial2)
id1 = mapping_Idx(serial1,t_serial)
id2 = mapping_Idx(serial2,t_serial)
if 'feature_dim_transform_1' in self.config:
sel_idx = self.config['feature_dim_transform_1']
sel_id1, sel_id2 = list(0,21)+list(range(sel_idx[0])), range(sel_idx[1])
else:
sel_id1 = list(0,21)+list(range(10))
sel_id2 = range(feature_dim1-sel_idx1)
if (self.feature_dim_motif==0) or (flag_1==True):
x_train1_trans = np.hstack((x_train1_trans[id1,sel_id1],feature_mtx[id2,sel_id2]))
else:
x_train1_trans = np.hstack((x_train1_trans_1[id1,sel_id1],feature_mtx[id2,sel_id2],x_train1_trans_2[id1,0:feature_dim2]))
train_sel_list_ori = train_sel_list_ori[id1]
else:
pass
elif flag2==1:
t_serial = np.intersect1d(serial1,serial2)
id1 = mapping_Idx(serial1,t_serial)
id2 = mapping_Idx(serial2,t_serial)
x_train1_trans = np.hstack((x_train1_trans_ori[id1,0:2],feature_mtx[id2,0:feature_dim1]))
train_sel_list_ori = train_sel_list_ori[id1]
self.feature_dim_select1 = -1
if (self.feature_dim_motif==1) and (flag_1==False):
x_train1_trans = np.hstack((x_train1_trans,x_train1_trans_2[id1,0:feature_dim2]))
# id1 = mapping_Idx(self.serial_ori,serial2)
# b1 = (id1>=0)
# id1 = id1[b1]
# serial2, feature_mtx = serial2[b1], feature_mtx[b1]
# chrom1 = self.chrom_ori[id1]
# chrom2 = np.zeros(len(serial2),dtype=np.int32)
# chrom_vec = np.unique(chrom1)
# for chrom_id in chrom_vec:
# b2 = np.where(chrom1==chrom_id)[0]
# chrom_id1 = int(chrom_id[3:])
# chrom2[b2] = chrom_id1
# x_train1_trans = feature_mtx[:,0:feature_dim1]
# trans_sel_list_ori = np.vstack((chrom2,serial2)).T
else:
print('data not found!')
return x_train1_trans, train_sel_list_ori
# prepare data from predefined features
def prep_data_sub1(self,path1,file_prefix,type_id2,feature_dim_transform,load_type=0):
self.feature_dim_transform = feature_dim_transform
# map_idx = mapping_Idx(serial_ori,serial)
sub_sample_ratio = 1
shuffle = 0
normalize, flanking, attention, run_id = self.normalize, self.flanking, self.attention, self.run_id
config = self.config
vec2 = dict()
tol = self.tol
L = flanking
# np.save(filename1)
print("feature transform")
# filename1 = '%s/%s_%d_%d_%d.npy'%(path1,file_prefix,type_id2,feature_dim_transform[0],feature_dim_transform[1])
print(self.species_id)
t_featuredim1, t_featuredim2 = feature_dim_transform[0], feature_dim_transform[1]
flag1 = False
if self.species_id=='hg38':
if 'motif_trans_typeid' in self.config:
flag1 = True
if (self.species_id=='mm10'):
flag1 = True
if (t_featuredim1>0) or (flag1==False):
x_train1_trans, train_sel_list_ori = self.prep_data_sub2(path1,file_prefix,type_id2,t_featuredim1,t_featuredim2,flag1)
if len(x_train1_trans)==0:
print('data not found!')
return -1
if t_featuredim2>0:
print('train_sel_list',train_sel_list_ori.shape)
print('x_train1_trans',x_train1_trans.shape)
if (self.feature_dim_motif>=1) and (flag1==True):
if self.species_id=='mm10':
annot1 = '%s_%d_motif'%(self.species_id,self.cell_type1)
else:
annot1 = '%s_motif'%(self.species_id)
motif_trans_typeid = self.config['motif_trans_typeid']
motif_featuredim = self.config['motif_featuredim']
motif_filename = '%s_%d_%d_trans.h5'%(annot1,motif_trans_typeid,motif_featuredim)
if motif_featuredim<t_featuredim2:
print('error! %d %d',motif_featuredim,t_featuredim2)
t_featuredim2 = motif_featuredim
with h5py.File(motif_filename,'r') as fid:
serial_1 = fid["serial"][:]
motif_data = fid["vec"][:]
print(len(serial_1),motif_data.shape)
serial1 = train_sel_list_ori[:,1]
serial2 = serial_1
t_serial = np.intersect1d(serial1,serial2)
id1 = mapping_Idx(serial1,t_serial)
id2 = mapping_Idx(serial2,t_serial)
x_train1_trans = np.hstack((x_train1_trans[id1],motif_data[id2,0:t_featuredim2]))
train_sel_list_ori = train_sel_list_ori[id1]
# train_sel_list_ori2 = serial_1[id2]
else:
print("data not found!")
return
x_train1_trans = self.feature_dim_select(x_train1_trans,feature_dim_transform)
# feature loaded not specific to cell type
if load_type==1:
return x_train1_trans, train_sel_list_ori
list1 = ['motif_feature','feature2']
for t_feature in list1:
if (t_feature in self.config) and (self.config[t_feature]==1):
if t_feature=='feature2':
pre_config = self.config['pre_config']
if self.chrom_num>0:
chrom_num = self.chrom_num
else:
chrom_num = len(np.unique(self.chrom))
chrom_vec = list(range(1,chrom_num+1))
feature_mtx2, serial_2 = self.prep_data_sequence_3(pre_config,chrom_vec)
else:
x = 1
x_train1_trans_ori1 = x_train1_trans.copy()
train_sel_list_ori1 = train_sel_list_ori.copy()
serial1 = train_sel_list_ori[:,1]
serial2 = serial_2[:,1]
t_serial = np.intersect1d(serial1,serial2)
id1 = mapping_Idx(serial1,t_serial)[0]
id2 = mapping_Idx(serial2,t_serial)[0]
x_train1_trans = np.hstack((x_train1_trans[id1],feature_mtx2[id2]))
train_sel_list_ori = train_sel_list_ori[id1]
train_sel_list_ori2 = serial_2[id2]
b1 = np.where(train_sel_list_ori[:,0]!=train_sel_list_ori2[:,0])[0]
if len(b1)>0:
print('error! train_sel_list_ori',len(b1))
if ('centromere' in self.config) and (self.config['centromere']==1):
regionlist_filename = 'hg38.centromere.bed'
serial1 = train_sel_list_ori[:,1]
serial_list1, centromere_serial = self.select_region(serial1, regionlist_filename)
id1 = mapping_Idx(serial1,serial_list1)
id1 = id1[id1>=0]
x_train1_trans = x_train1_trans[id1]
train_sel_list_ori = train_sel_list_ori[id1]
print(x_train1_trans.shape,train_sel_list_ori.shape)
print('positional encoding', self.pos_code)
print('feature dim',x_train1_trans.shape)
self.feature_dim = x_train1_trans.shape[1]
start = time.time()
if self.pos_code ==1:
x_train1_trans = self.positional_encoding1(x_train1_trans,train_sel_list_ori,self.feature_dim)
print(x_train1_trans.shape)
stop = time.time()
print('positional encoding', stop-start)
## shuffle array
if ('shuffle' in self.config) and (self.config['shuffle']==1):
x_train1_trans, shuffle_id1 = shuffle_array(x_train1_trans)
print('array shuffled')
# np.random.shuffle(x_tran1_trans)
# train_sel_list = train_sel_list[shuffle_id1]
elif ('noise' in self.config) and (self.config['noise']>0):
if self.config['noise']==1:
x_train1_trans = np.zeros_like(x_train1_trans)
print('x_train1_trans, noise 1', x_train1_trans[0:5])
elif self.config['noise']==2:
x_train1_trans = np.random.uniform(0,1,x_train1_trans.shape)
else:
x_train1_trans = np.random.normal(0,1,x_train1_trans.shape)
else:
pass
if 'sub_sample_ratio' in self.config:
sub_sample_ratio = self.config['sub_sample_ratio']
num_sample = len(train_sel_list_ori)
sub_sample = int(num_sample*sub_sample_ratio)
train_sel_list_ori = train_sel_list_ori[0:sub_sample]
x_train1_trans = x_train1_trans[0:sub_sample]
# align train_sel_list_ori and serial
print(train_sel_list_ori.shape,len(self.serial))
id1 = mapping_Idx(train_sel_list_ori[:,1],self.serial)
id2 = (id1>=0)
print('mapping',len(self.serial),np.sum(id2),len(self.serial),len(id2))
# self.chrom, self.start, self.stop, self.serial, self.signal = self.chrom[id2], self.start[id2], self.stop[id2], self.serial[id2], self.signal[id2]
self.local_serial_1(id2)
id1 = id1[id2]
train_sel_list_ori = train_sel_list_ori[id1]
x_train1_trans = x_train1_trans[id1]
self.x_train1_trans = x_train1_trans
self.train_sel_list = train_sel_list_ori
return x_train1_trans, train_sel_list_ori
def output_generate_sequences(self,idx_sel_list,seq_list):
num1 = len(seq_list)
t_serial1 = idx_sel_list[:,1]
seq_list = np.asarray(seq_list)
t_serial = t_serial1[seq_list]
id1 = mapping_Idx(self.serial,t_serial[:,0])
chrom1, start1, stop1 = self.chrom[id1], self.start[id1], self.stop[id1]
id2 = mapping_Idx(self.serial,t_serial[:,1])
chrom2, start2, stop2 = self.chrom[id2], self.start[id2], self.stop[id2]
fields = ['chrom','start','stop','serial1','serial2']
data1 = pd.DataFrame(columns=fields)
data1['chrom'], data1['start'], data1['stop'] = chrom1, start1, stop2
data1['serial1'], data1['serial2'] = t_serial[:,0], t_serial[:,1]
data1['region_len'] = t_serial[:,1]-t_serial[:,0]+1
output_filename = 'test_seqList_%d_%d.txt'%(idx_sel_list[0][0],idx_sel_list[0][1])
data1.to_csv(output_filename,index=False,sep='\t')
return True
# prepare data from predefined features
def prep_data(self,path1,file_prefix,type_id2,feature_dim_transform):
x_train1_trans, train_sel_list_ori = self.prep_data_sub1(path1,file_prefix,type_id2,feature_dim_transform)
train_sel_list, val_sel_list, test_sel_list = self.prep_training_test(train_sel_list_ori)
# keys = ['train','valid','test']
keys = ['train','valid']
# self.idx_sel_list = {'train':train1_sel_list,'valid':val_sel_list,'test':test_sel_list}
idx_sel_list = {'train':train_sel_list,'valid':val_sel_list,'test':test_sel_list}
# self.idx_sel_list = idx_sel_list
# seq_list_train, seq_list_valid: both locally calculated
self.seq_list = dict()
start = time.time()
for i in keys:
self.seq_list[i] = generate_sequences(idx_sel_list[i],region_list=self.region_boundary)
print(len(self.seq_list[i]))
self.output_generate_sequences(idx_sel_list[i],self.seq_list[i])
stop = time.time()
print('generate_sequences', stop-start)
# generate initial state index
self.init_id = dict()
self.init_index(keys)
# training and validation data
# x_train1_trans = self.x_train1_trans
for i in keys:
idx = self.idx_list[i]
if self.method<5 or self.method in [56]:
self.x[i] = x_train1_trans[idx]
self.y[i] = self.y_signal[i]
print(self.x[i].shape, self.y[i].shape)
else:
idx_sel_list = self.train_sel_list[idx]
start = time.time()
x, y, self.vec[i], self.vec_local[i] = sample_select2a1(x_train1_trans[idx],self.y_signal[i],
idx_sel_list, self.seq_list[i], self.tol, self.flanking)
stop = time.time()
print('sample_select2a1',stop-start)
# concate context for baseline methods
if self.method<=10:
# x_train, x_valid, y_train, y_valid = train_test_split(x_train1, y_train1, test_size=0.2, random_state=42)
x = x.reshape(x.shape[0],x.shape[1]*x.shape[-1])
y = y[:,self.flanking]
self.x[i], self.y[i] = x, y
print(self.x[i].shape, self.y[i].shape)
return True
# prepare data from predefined features
def prep_data_1(self,path1,file_prefix,type_id2,feature_dim_transform,
n_fold=5, ratio=0.9, type_id=1):
x_train1_trans, train_sel_list_ori = self.prep_data_sub1(path1,file_prefix,type_id2,feature_dim_transform)
print(train_sel_list_ori)
id1 = mapping_Idx(train_sel_list_ori[:,1],self.serial)
id2 = (id1>=0)
print('mapping',len(self.serial),np.sum(id2))
self.chrom, self.start, self.stop, self.serial, self.signal = self.chrom[id2], self.start[id2], self.stop[id2], self.serial[id2], self.signal[id2]
id1 = id1[id2]
train_sel_list_ori = train_sel_list_ori[id1]
self.x_train1_trans = self.x_train1_trans[id1]
print(train_sel_list_ori.shape,self.x_train1_trans.shape)
id_vec = self.generate_index_2(train_sel_list_ori, n_fold=n_fold, ratio=ratio, type_id=type_id)
return id_vec
def find_serial_ori_1_local(self,chrom_vec,type_id2=1):
# filename1 = 'mm10_%d_%s_encoded1.h5'%(self.config['cell_type1'],chrom_id1)
self.species_id = 'mm10'
self.cell_type1 = self.config['cell_type1']
file_path1 = '/work/magroup/yy3/data1/replication_timing3/mouse'
# filename1 = '%s/mm10_5k_seq_genome%d_1.txt'%(file_path1,self.config['cell_type1'])
chrom_id1 = 'chr1'
filename1 = '%s_%d_%s_encoded1.h5'%(self.species_id,self.cell_type1,chrom_id1)
list1, list2 = [], []
serial_vec = []
print(filename1)
if os.path.exists(filename1)==False:
# prepare data from predefined features
# one hot encoded feature vectors for each chromosome
self.prep_data_sequence_ori()
print('prep_data_sequence_ori',filename1)
for chrom_id in chrom_vec:
# if chrom_id<22:
# continue
chrom_id1 = 'chr%s'%(chrom_id)
# if self.config['species_id']==0:
# filename2 = 'mm10_%d_%s_encoded1.h5'%(self.config['cell_type1'],chrom_id1)
# else:
# filename2 = '%s_%s_encoded1.h5'%(self.species_id,chrom_id1)
filename2 = '%s_%d_%s_encoded1.h5'%(self.species_id,self.cell_type1,chrom_id1)
with h5py.File(filename2,'r') as fid:
serial1 = fid["serial"][:]
if type_id2==1:
seq1 = fid["vec"][:]
list2.extend(seq1)
list1.extend([chrom_id]*len(serial1))
serial_vec.extend(serial1)
print(chrom_id,len(serial1))
list1, serial_vec = np.asarray(list1), np.asarray(serial_vec)
serial_vec = np.hstack((list1[:,np.newaxis],serial_vec))
f_mtx = np.asarray(list2)
# data_1 = pd.read_csv(filename1,sep='\t')
# colnames = list(data_1)
# local_serial = np.asarray(data_1['serial'])
# local_seq = np.asarray(data_1['seq'])
# print('local_seq', local_seq.shape)
# serial_vec = local_serial
# f_mtx = local_seq
# filename2 = '%s/mm10_5k_serial.bed'%(file_path1)
# file2 = pd.read_csv(filename2,header=None,sep='\t')
# ref_chrom, ref_start, ref_stop, ref_serial = np.asarray(file2[0]), np.asarray(file2[1]), np.asarray(file2[2]), np.asarray(file2[3])
# # assert list(local_serial==list(ref_serial))
# id_vec1 = []
# for chrom_id in chrom_vec:
# # if chrom_id<22:
# # continue
# # chrom_id1 = 'chr%s'%(chrom_id)
# id1 = np.where(ref_chrom=='chr%d'%(chrom_id))[0]
# id_vec1.extend(id1)
# print(chrom_id,len(id1))
# id_vec1 = np.asarray(id_vec1)
# ref_chrom_1, ref_serial_1 = ref_chrom[id_vec1], ref_serial[id_vec1]
# print('ref chrom local', len(ref_chrom_1), len(ref_serial_1))
# id1 = utility_1.mapping_Idx(ref_serial_1,local_serial)
# id2 = np.where(id1>=0)[0]
# id1 = id1[id2]
# # assert len(id2)==len(id1)
# chrom1 = ref_chrom_1[id1]
# local_chrom = [int(chrom1[3:]) for chrom1 in ref_chrom_1]
# local_chrom = np.asarray(local_chrom)
# local_serial, local_seq = local_serial[id2], local_seq[id2]
# serial_vec = np.column_stack((local_chrom,local_serial))
# f_mtx = np.asarray(local_seq)
return serial_vec, f_mtx
# find serial and feature vectors
# input: type_id1: load sequence feature or kmer frequency feature, motif feature
# type_id2: load serial or feature vectors
def find_serial_ori_1(self,file_path,file_prefix,chrom_vec,type_id1=0,type_id2=0,select_config={}):
# load the sequences
if type_id1==0:
# list2 = np.zeros((interval,region_unit_size,4),dtype=np.int8)
filename1 = '%s_serial_2.txt'%(self.species_id)
list1, list2 = [], []
serial_vec = []
if (os.path.exists(filename1)==False) or (type_id2==1):
if self.config['species_id']==0:
serial_vec, list2 = self.find_serial_ori_1_local(chrom_vec)
else:
for chrom_id in chrom_vec:
# if chrom_id<22:
# continue
chrom_id1 = 'chr%s'%(chrom_id)
filename2 = '%s_%s_encoded1.h5'%(self.species_id,chrom_id1)
with h5py.File(filename2,'r') as fid:
serial1 = fid["serial"][:]
if type_id2==1:
seq1 = fid["vec"][:]
list2.extend(seq1)
list1.extend([chrom_id]*len(serial1))
serial_vec.extend(serial1)
print(chrom_id,len(serial1))
list1, serial_vec = np.asarray(list1), np.asarray(serial_vec)
serial_vec = np.hstack((list1[:,np.newaxis],serial_vec))
np.savetxt(filename1,serial_vec,fmt='%d',delimiter='\t')
else:
serial_vec = np.loadtxt(filename1,dtype=np.int64)
if serial_vec.shape[-1]>2:
cnt1 = serial_vec[:,-1]
b1 = np.where(cnt1>0)[0]
ratio1 = len(b1)/len(serial_vec)
print('sequence with N', len(b1),len(serial_vec),ratio1)
# serial_vec = serial_vec[:,0]
f_mtx = np.asarray(list2)
elif type_id1==2:
filename1 = select_config['input_filename1']
layer_name = select_config['layer_name']
with h5py.File(filename1,'r') as fid:
f_mtx = np.asarray(fid[layer_name][:],dtype=np.float32)
print(f_mtx.shape)
serial_vec = fid["serial"][:]
assert len(serial_vec )==f_mtx.shape[0]
print(serial_vec[0:5])
else:
# load kmer frequency features and motif features
load_type_id2 = 0
x_train1_trans, train_sel_list_ori = self.prep_data_sub1(file_path,file_prefix,load_type_id2,self.feature_dim_transform,load_type=1)
# serial_vec = train_sel_list_ori[:,1]
serial_vec = np.asarray(train_sel_list_ori)
f_mtx = np.asarray(x_train1_trans)
return serial_vec, f_mtx
def find_serial_ori(self,file_path,file_prefix,type_id1=0,type_id2=0,select_config={}):
chrom_vec = np.unique(self.chrom)
chrom_vec1 = []
for chrom_id in chrom_vec:
try:
id1 = chrom_id.find('chr')
if id1>=0:
chrom_id1 = int(chrom_id[3:])
chrom_vec1.append(chrom_id1)
except:
continue
chrom_vec1 = np.sort(chrom_vec1)
serial_vec, f_mtx = self.find_serial_ori_1(file_path,file_prefix,chrom_vec1,
type_id1=type_id1,type_id2=type_id2,
select_config=select_config)
self.serial_vec = serial_vec
self.f_mtx = f_mtx
# list2 = np.zeros((interval,region_unit_size,4),dtype=np.int8)
print(len(self.chrom),len(self.serial))
# cnt1 = serial_vec[:,1]
# b1 = np.where(cnt1>0)[0]
# ratio1 = len(b1)/len(serial_vec)
# print(len(b1),len(serial_vec),ratio1)
id1 = mapping_Idx(serial_vec[:,1],self.serial)
b1 = np.where(id1>=0)[0]
self.local_serial_1(b1,type_id=0)
print(len(self.chrom),len(self.serial))
return True
def prep_data_2(self,file_path,file_prefix,seq_len_thresh=50):
self.find_serial_ori(file_path,file_prefix)
chrom_vec = np.unique(self.chrom)
chrom_vec1 = []
for chrom_id in chrom_vec:
try:
id1 = chrom_id.find('chr')
if id1>=0:
chrom_id1 = int(chrom_id[3:])
chrom_vec1.append(chrom_id1)
except:
continue
chrom_vec1 = np.sort(chrom_vec1)
sample_num = len(self.chrom)
idx_sel_list = -np.ones((sample_num,2),dtype=np.int64)
for chrom_id in chrom_vec1:
chrom_id1 = 'chr%d'%(chrom_id)
b1 = np.where(self.chrom==chrom_id1)[0]
idx_sel_list[b1,0] = [chrom_id]*len(b1)
idx_sel_list[b1,1] = self.serial[b1]
id1 = idx_sel_list[:,0]>=0
idx_sel_list = idx_sel_list[id1]
sample_num = len(id1)
y = self.signal[id1]
x_mtx = idx_sel_list[id1]
seq_list = generate_sequences(idx_sel_list, gap_tol=5, region_list=[])
seq_len = seq_list[:,1]-seq_list[:,0]+1
thresh1 = seq_len_thresh
b1 = np.where(seq_len>thresh1)[0]
print(len(seq_list),len(b1))
seq_list = seq_list[b1]
seq_len1 = seq_list[:,1]-seq_list[:,0]+1
print(sample_num,np.sum(seq_len1),seq_list.shape,np.max(seq_len),np.min(seq_len),np.median(seq_len),np.max(seq_len1),np.min(seq_len1),np.median(seq_len1))
self.output_generate_sequences(idx_sel_list,seq_list)
t_mtx, signal_mtx, vec1_serial, vec1_local = sample_select2a1(x_mtx, y, idx_sel_list, seq_list, tol=self.tol, L=self.flanking)
t_serial = vec1_serial[:,self.flanking]
context_size = vec1_serial.shape[1]
id1 = mapping_Idx(idx_sel_list[:,1],t_serial)
b1 = np.where(id1>=0)[0]
if len(b1)!=len(vec1_serial):
print('error!',len(b1),len(vec1_serial))
return -1
sel_id1 = id1[b1]
# idx_sel_list1 = idx_sel_list[sel_id1]
# label1 = y[sel_id1]
t_chrom = idx_sel_list[sel_id1,0]
print(t_chrom,t_serial)
print(t_chrom.shape,t_serial.shape)
print(vec1_serial.shape)
list_ID = []
cnt1 = 0
interval = 200
list1, list2 = [],[]
list3 = []
# region_unit_size = 5000
# list2 = np.zeros((interval,region_unit_size,4),dtype=np.int8)
for chrom_id in chrom_vec1:
# if chrom_id<22:
# continue
chrom_id1 = 'chr%s'%(chrom_id)
filename1 = '%s_%s_encoded1.h5'%(self.species_id,chrom_id1)
t_id1 = np.where(t_chrom==chrom_id)[0]
t_serial1 = t_serial[t_id1] # serial by chromosome
sample_num1 = len(t_serial1)
num_segment = np.int(np.ceil(sample_num1/interval))
print(chrom_id1,num_segment,interval,sample_num1)
with h5py.File(filename1,'r') as fid:
serial1 = fid["serial"][:]
seq1 = fid["vec"][:]
serial1 = serial1[:,0]
print(serial1.shape, seq1.shape)
id1 = utility_1.mapping_Idx(serial1,t_serial1)
id2 = np.where(id1>=0)[0]
num1 = len(id2)
segment_id = 0
t_signal_mtx = signal_mtx[t_id1[id2]]
list3.extend(t_signal_mtx)
for i in range(num1):
cnt2 = i+1
t_id2 = id2[i]
label_serial = t_serial1[t_id2]
t_vec1_serial = vec1_serial[t_id1[t_id2]]
id_1 = mapping_Idx(serial1,t_vec1_serial)
b1 = np.where(id_1>=0)[0]
if len(b1)!=context_size:
b2 = np.where(id_1<0)[0]
print('error!',chrom_id1,label_serial,t_vec1_serial[b2],len(b1),context_size)
np.savetxt('temp1.txt',serial1,fmt='%d',delimiter='\t')
np.savetxt('temp2.txt',t_vec1_serial,fmt='%d',delimiter='\t')
return -1
t_mtx = seq1[id_1[b1]]
list1.append(t_vec1_serial)
list2.append(t_mtx)
local_id = cnt2%interval
label_id = cnt1
output_filename = 'test1_%s_%s_%d.h5'%(self.cell,chrom_id1,segment_id)
if (cnt2%interval==0) or (cnt2==num1):
output_filename1 = '%s/%s'%(file_path,output_filename)
list1 = np.asarray(list1)
list2 = np.asarray(list2,dtype=np.int8)
print(chrom_id1,segment_id,local_id,label_id,label_serial,list1.shape,list2.shape)
# with h5py.File(output_filename1,'w') as fid:
# fid.create_dataset("serial", data=list1, compression="gzip")
# fid.create_dataset("vec", data=list2, compression="gzip")
# dict1 = {'serial':list1.tolist(),'vec':list2.tolist()}
# np.save(output_filename,dict1,allow_pickle=True)
# with open(output_filename, "w") as fid:
# json.dump(dict1,fid)
# with open(output_filename,"w",encoding='utf-8') as fid:
# json.dump(dict1,fid,separators=(',', ':'), sort_keys=True, indent=4)
list1, list2 = [], []
segment_id += 1
cnt1 = cnt1+1
list_ID.append([label_id,label_serial,output_filename,local_id])
# if cnt2%interval==0:
# break
# with open(output_filename, "r") as fid:
# dict1 = json.load(fid)
# serial1, vec1 = np.asarray(dict1['serial']), np.asarray(dict1['vec'])
# print(serial1.shape,vec1.shape)
# with h5py.File(output_filename1,'r') as fid:
# serial1 = fid["serial"][:]
# vec1 = fid["vec"][:]
# print(serial1.shape,vec1.shape)
fields = ['label_id','label_serial','filename','local_id']
list_ID = np.asarray(list_ID)
data1 = pd.DataFrame(columns=fields,data=list_ID)
output_filename = '%s/%s_label_ID_1'%(file_path,self.cell)
data1.to_csv(output_filename+'.txt',index=False,sep='\t')
# np.save(output_filename,data1,allow_pickle=True)
output_filename = '%s/%s_label.h5'%(file_path,self.cell)
list3 = np.asarray(list3)
print(list3.shape)
with h5py.File(output_filename,'w') as fid:
fid.create_dataset("vec", data=np.asarray(list3), compression="gzip")
return list_ID
# find serial for training and validation data
def prep_data_2_sub1(self,file_path,file_prefix,type_id1=0,type_id2=0,gap_tol=5,seq_len_thresh=5,select_config={}):
if type_id1>=0:
self.find_serial_ori(file_path,file_prefix,
type_id1=type_id1,type_id2=type_id2,
select_config=select_config)
chrom_vec = np.unique(self.chrom)
chrom_vec1 = []
for chrom_id in chrom_vec:
try:
id1 = chrom_id.find('chr')
if id1>=0:
chrom_id1 = int(chrom_id[3:])
chrom_vec1.append(chrom_id1)
except:
continue
chrom_vec1 = np.sort(chrom_vec1)
sample_num = len(self.chrom)
idx_sel_list = -np.ones((sample_num,2),dtype=np.int64)
if 'gap_thresh' in self.config:
gap_tol = self.config['gap_thresh']
if 'seq_len_thresh' in self.config:
seq_len_thresh = self.config['seq_len_thresh']
for chrom_id in chrom_vec1:
chrom_id1 = 'chr%d'%(chrom_id)
b1 = np.where(self.chrom==chrom_id1)[0]
idx_sel_list[b1,0] = [chrom_id]*len(b1)
idx_sel_list[b1,1] = self.serial[b1]
id1 = idx_sel_list[:,0]>=0
idx_sel_list = idx_sel_list[id1]
sample_num = len(id1)
y = self.signal[id1]
x_mtx = idx_sel_list[id1]
self.train_sel_list_ori = idx_sel_list
self.y_signal_1 = self.signal[id1]
ref_serial = idx_sel_list[:,1]
# train_sel_list, val_sel_list = train1_sel_list[idx_train], train1_sel_list[idx_valid]
# self.idx_list.update({'train':train_id1[idx_train],'valid':train_id1[idx_valid]})
# self.idx_train_val = {'train':idx_train,'valid':idx_valid}
# self.y_signal.update({'train':y_signal_train1[idx_train],'valid':y_signal_train1[idx_valid]})
train_sel_list, val_sel_list, test_sel_list = self.prep_training_test(idx_sel_list)
print(len(train_sel_list),len(val_sel_list),len(test_sel_list))
keys = ['train','valid','test']
# keys = ['train','valid']
# self.idx_sel_list = {'train':train1_sel_list,'valid':val_sel_list,'test':test_sel_list}
self.idx_sel_list_ori = {'train':train_sel_list,'valid':val_sel_list,'test':test_sel_list}
# self.idx_sel_list = idx_sel_list
# seq_list_train, seq_list_valid: both locally calculated
self.seq_list = dict()
start = time.time()
# seq_len_thresh = 20
self.local_serial_dict = dict()
for i in keys:
# self.seq_list[i] = generate_sequences(idx_sel_list1[i],region_list=self.region_boundary)
# print(len(self.seq_list[i]))
# self.output_generate_sequences(idx_sel_list[i],self.seq_list[i])
idx_sel_list1 = self.idx_sel_list_ori[i]
# region_list_id = 'region_list_%s'%(i)
# if region_list_id in self.config:
# region_list = self.config[region_list_id]
# else:
# region_list = []
# region_list = np.asarray(region_list)
# print(region_list_id,region_list)
# if i=='test':
# region_boundary = self.region_boundary
# else:
# region_boundary = []
region_boundary = self.region_boundary
print('region_boundary',region_boundary)
# assert len(region_boundary)==0
seq_list = generate_sequences(idx_sel_list1, gap_tol=gap_tol, region_list=region_boundary)
# seq_len = seq_list[:,1]-seq_list[:,0]+1
# thresh1 = seq_len_thresh
# b1 = np.where(seq_len>thresh1)[0]
# print(len(seq_list),len(b1))
# seq_list = seq_list[b1]
# seq_len1 = seq_list[:,1]-seq_list[:,0]+1
# print(sample_num,np.sum(seq_len1),len(seq_list),np.max(seq_len),np.min(seq_len),np.median(seq_len),np.max(seq_len1),np.min(seq_len1),np.median(seq_len1))
# reselect the regions according to the subsequence length
# recalculate seq_list
idx_sel_list1, seq_list = self.select_region_local_1(idx_sel_list1,seq_list,
gap_tol=gap_tol,
seq_len_thresh=seq_len_thresh,
region_list=[])
self.idx_sel_list_ori[i] = idx_sel_list1
self.seq_list[i] = seq_list
x1 = idx_sel_list1
sel_id = utility_1.mapping_Idx(ref_serial,idx_sel_list1[:,1])
y1 = self.y_signal_1[sel_id]
x, y, t_vec_serial, t_vec_local = sample_select2a1(x1,y1,
idx_sel_list1, seq_list, self.tol, self.flanking)
t_serial1 = t_vec_serial[:,self.flanking]
# if np.sum(t_serial1!=sel_idx_list1[:,1])>0:
# print('error!',i)
# return
id1 = utility_1.mapping_Idx(idx_sel_list1[:,1],t_serial1)
b1 = np.where(id1>=0)[0]
if len(b1)!=len(t_serial1):
print('error!',i)
return
idx_sel_list1 = idx_sel_list1[id1[b1]]
self.local_serial_dict[i] = [idx_sel_list1,y1,y,t_vec_serial,t_vec_local]
print(i,t_serial1.shape,y.shape)
stop = time.time()
print('generate_sequences', stop-start)
return self.local_serial_dict
# load feature
def load_feature_local(self,chrom_vec,type_id=0,select_config={}):
# load sequences
if type_id==0:
serial_vec = []
list1, list2 = [],[]
# list2 = np.zeros((interval,region_unit_size,4),dtype=np.int8)
if self.config['species_id']==0:
serial_vec, f_mtx = self.find_serial_ori_1_local(chrom_vec)
else:
for chrom_id in chrom_vec:
# if chrom_id<22:
# continue
chrom_id1 = 'chr%s'%(chrom_id)
filename1 = '%s_%s_encoded1.h5'%(self.species_id,chrom_id1)
with h5py.File(filename1,'r') as fid:
serial1 = fid["serial"][:]
seq1 = fid["vec"][:]
serial_vec.extend(serial1)
list1.extend([chrom_id]*len(serial1))
list2.extend(seq1)
print(len(serial1),seq1.shape)
list1 = np.asarray(list1)
serial_vec = np.hstack((list1[:,np.newaxis],serial_vec))
f_mtx = np.asarray(list2)
# kmer frequency and motif feature
elif type_id==1:
if len(self.serial_vec)>0 and (len(self.f_mtx)>0):
serial_vec = self.serial_vec
f_mtx = self.f_mtx
else:
type_id2 = 0
x_train1_trans, train_sel_list_ori = self.prep_data_sub1(self.file_path,self.file_prefix,type_id2,self.feature_dim_transform,load_type=1)
# serial_vec = train_sel_list_ori[:,1]
serial_vec = np.asarray(train_sel_list_ori)
f_mtx = np.asarray(x_train1_trans)
else:
filename1 = select_config['input_filename1']
layer_name = select_config['layer_name']
with h5py.File(filename1,'r') as fid:
f_mtx = np.asarray(fid[layer_name][:],dtype=np.float32)
print(f_mtx.shape)
serial_vec = fid["serial"][:]
assert len(serial_vec )==f_mtx.shape[0]
print(serial_vec[0:5])
return serial_vec, f_mtx
# find serial
def find_serial_local(self,ref_serial,vec_serial_ori,sel_id):
serial_1 = vec_serial_ori[:,self.flanking]
# print(len(ref_serial),ref_serial)
# print(len(serial_1),serial_1)
assert np.max(np.abs(ref_serial-serial_1))==0
t_vec_serial = np.ravel(vec_serial_ori[sel_id])
serial1 = np.unique(t_vec_serial)
id1 = mapping_Idx(ref_serial,serial1)
b1 = np.where(id1<0)[0]
if len(b1)>0:
print('error!',len(b1))
print(serial1[b1])
b_1 = np.where(id1>=0)[0]
id1 = id1[b_1]
sample_num = len(ref_serial)
id2 = np.setdiff1d(np.arange(sample_num),id1)
if len(id2)>0:
t_serial2 = ref_serial[id2]
id_2 = mapping_Idx(serial_1,t_serial2)
sel_id = list(sel_id)+list(id_2)
sel_id = np.unique(sel_id)
print('find serial local',len(sel_id),len(id_2))
return sel_id
# load training and validation data
def prep_data_2_sub2(self,type_id1=0,keys=['train','valid'],stride=1,type_id=0,select_config={}):
chrom1 = []
for i in range(0,len(keys)):
key1 = keys[i]
idx_sel_list, y_ori, y, vec_serial, vec_local = self.local_serial_dict[key1]
chrom1.extend(idx_sel_list[:,0])
chrom_vec1 = np.sort(np.unique(chrom1))
serial_vec, f_mtx = self.load_feature_local(chrom_vec1,type_id=type_id1,select_config=select_config)
print('load feature local', serial_vec.shape, f_mtx.shape)
if serial_vec.shape[1]>2:
cnt1 = serial_vec[:,-1]
b1 = np.where(cnt1>0)[0]
ratio1 = len(b1)/len(serial_vec)
print(len(b1),len(serial_vec),ratio1)
ref_serial = serial_vec[:,1]
for i in range(0,len(keys)):
key1 = keys[i]
idx_sel_list, y_ori, y, vec_serial, vec_local = self.local_serial_dict[key1]
num1 = len(idx_sel_list)
if stride>1:
id1 = list(range(0,num1,stride))
# the windows cover the positions
print(num1,stride)
if type_id==1:
id1 = self.find_serial_local(idx_sel_list[:,1],vec_serial,id1)
y, vec_serial, vec_local = y[id1], vec_serial[id1], vec_local[id1]
self.local_serial_dict[key1] = [idx_sel_list, y_ori, y, vec_serial, vec_local]
id2 = mapping_Idx(ref_serial,idx_sel_list[:,1])
print(key1,len(ref_serial),len(idx_sel_list))
print(ref_serial[0:5])
print(idx_sel_list[0:5,1])
b1 = np.where(id2<0)[0]
if len(b1)>0:
print('error!',len(b1),key1)
# return
print('mapping',len(id2))
# update
b_1 = np.where(id2>=0)[0]
id2 = id2[b_1]
idx_sel_list, y_ori = idx_sel_list[b_1], y_ori[b_1]
y, vec_serial, vec_local = y[b_1], vec_serial[b_1], vec_local[b_1]
self.local_serial_dict[key1] = [idx_sel_list, y_ori, y, vec_serial, vec_local]
self.x[key1] = f_mtx[id2]
self.idx[key1] = id2
return True
# training and predition with sequences
def control_pre_test1_repeat(self,path1,file_prefix,run_id_load=-1):
self.prep_data_2_sub1(path1,file_prefix)
config = self.config.copy()
units1=[50,50,32,25,50,25,0,0]
flanking = 50
context_size = 2*flanking+1
n_step_local_ori = 5000
region_unit_size = 1
feature_dim = 4
local_conv_list1 = []
regularizer2, bnorm, activation = 1e-04, 1, 'relu'
if self.run_id==110001:
config_vec1 = [[64, 15, 5, 1, 2, 2, 0.2, 0],
[32, 5, 1, 1, 10, 10, 0.2, 0],
[32, 3, 1, 1, 5, 5, 0.2, 0]]
for t1 in config_vec1:
n_filters, kernel_size1, stride, dilation_rate1, pool_length1, stride1, drop_out_rate, boundary = t1
conv_1 = [n_filters, kernel_size1, stride, regularizer2, dilation_rate1, boundary, bnorm, activation, pool_length1, stride1, drop_out_rate]
local_conv_list1.append(conv_1)
config['local_conv_list1'] = local_conv_list1
print(local_conv_list1)
feature_dim1, feature_dim2, return_sequences_flag1, sample_local, pooling_local = 32, 25, True, 0, 0
n_step_local1 = 10
feature_dim3 = []
local_vec_1 = [feature_dim1, feature_dim2, feature_dim3, return_sequences_flag1, sample_local, pooling_local]
attention2_local = 0
select2 = 1
concatenate_1, concatenate_2 = 0, 1
hidden_unit = 32
regularizer2_2 = 1e-04
config.update({'attention1':0,'attention2':1,'select2':select2,'context_size':context_size,'n_step_local':n_step_local1,'n_step_local_ori':n_step_local_ori})
config.update({'local_vec_1':local_vec_1,'attention2_local':attention2_local})
config['feature_dim_vec'] = units1[2:]
config['feature_dim_vec_basic'] = units1[2:]
config.update({'local_conv_list1':local_conv_list1,'local_vec_1':local_vec_1})
config.update({'attention1':0,'attention2':1,'context_size':context_size,
'n_step_local_ori':n_step_local_ori})
config.update({'select2':select2,'attention2_local':attention2_local})
config.update({'concatenate_1':concatenate_1,'concatenate_2':concatenate_2})
config.update({'feature_dim':feature_dim,'output_dim':hidden_unit,'regularizer2_2':regularizer2_2})
model = utility_1.get_model2a1_attention_1_2_2_sample5(config)
# find feature vectors with the serial
self.x = dict()
self.idx = dict()
self.prep_data_2_sub2(type_id1=0,keys=['train','valid'],stride=1)
mtx_train = self.x['train']
idx_sel_list_train, y_train_ori_1, y_train_ori, vec_serial_train, vec_local_train = self.local_serial_dict['train']
mtx_valid = self.x['valid']
idx_sel_list_valid, y_valid_ori_1, y_valid_ori, vec_serial_valid, vec_local_valid = self.local_serial_dict['valid']
train_num1, valid_num1 = len(y_train_ori), len(y_valid_ori)
print('train',len(idx_sel_list_train),len(y_train_ori),mtx_train.shape)
print('valid',len(idx_sel_list_valid),len(y_valid_ori),mtx_valid.shape)
x_valid = mtx_valid[vec_local_valid]
y_valid = y_valid_ori
print(x_valid.shape,y_valid.shape)
type_id2 = 2
MODEL_PATH = 'test%d.h5'%(self.run_id)
n_epochs = 1
BATCH_SIZE = 32
n_step_local = n_step_local_ori
earlystop = EarlyStopping(monitor='val_loss', min_delta=self.min_delta, patience=self.step, verbose=1, mode='auto')
checkpointer = ModelCheckpoint(filepath=MODEL_PATH, monitor='val_loss', verbose=1, save_best_only=True, save_weights_only=False)
num_sample1 = 1
interval = 2500
select_num = np.int(np.ceil(train_num1/interval))
# select_num1 = select_num*interval
# print(num_sample1,select_num,interval,select_num1)
if select_num>1:
t1 = np.arange(0,train_num1,interval)
pos = np.vstack((t1,t1+interval)).T
pos[-1][1] = train_num1
print(train_num1,select_num,interval)
print(pos)
else:
pos = [[0,train_num1]]
start2 = time.time()
train_id_1 = np.arange(train_num1)
valid_id_1 = np.arange(valid_num1)
np.random.shuffle(valid_id_1)
cnt1 = 0
mse1 = 1e5
decay_rate = 0.95
decay_step = 1
init_lr = self.config['lr']
for i1 in range(50):
self.config['lr'] = init_lr*((decay_rate)**(int(i1/decay_step)))
np.random.shuffle(train_id_1)
start1 = time.time()
valid_num2 = 2500
num2 = np.min([valid_num1,valid_num2])
valid_id2 = valid_id_1[0:num2]
x_valid1, y_valid1 = x_valid[valid_id2], y_valid[valid_id2]
for l in range(select_num):
s1, s2 = pos[l]
print(l,s1,s2)
sel_id = train_id_1[s1:s2]
x_train = mtx_train[vec_local_train[sel_id]]
y_train = y_train_ori[sel_id]
x_train, y_train = np.asarray(x_train), np.asarray(y_train)
print(x_train.shape,y_train.shape)
n_epochs = 1
train_num = x_train.shape[0]
print('x_train, y_train', x_train.shape, y_train.shape)
print('x_valid, y_valid', x_valid1.shape, y_valid1.shape)
# model.fit(x_train,y_train,epochs = n_epochs,batch_size = BATCH_SIZE,validation_data = [x_valid,y_valid],callbacks=[earlystop,checkpointer])
model.fit(x_train,y_train,epochs = n_epochs, batch_size = BATCH_SIZE, validation_data = [x_valid1,y_valid1],
callbacks=[earlystop,checkpointer])
# model.load_weights(MODEL_PATH)
model_path2 = '%s/model_%d_%d_%d_%d.h5'%(self.path,self.run_id,type_id2,context_size,i1)
model.save(model_path2)
# model_path2 = MODEL_PATH
if l%5==0:
print('loading weights... ', MODEL_PATH)
model.load_weights(MODEL_PATH) # load model with the minimum training error
y_predicted_valid1 = model.predict(x_valid)
y_predicted_valid = | np.ravel(y_predicted_valid1[:,flanking]) | numpy.ravel |
import argparse
import time
import cv2.cv2 as cv
import numpy as np
from scipy.signal import find_peaks
h_bins = 8
s_bins = 4
v_bins = 4
num_of_bins = h_bins + s_bins + v_bins
def check_time(args, cap):
frame_count = int(cap.get(cv.CAP_PROP_FRAME_COUNT))
start_frame = int(args.start_sec * cap.get(cv.CAP_PROP_FPS))
if args.end_sec < 0:
end_frame = frame_count
else:
end_frame = int(args.end_sec * cap.get(cv.CAP_PROP_FPS))
if end_frame > frame_count:
end_frame = frame_count
print('[w] End time greater than the length of the video.')
if end_frame < start_frame:
print('[f] End time must larger than start time.')
raise ValueError
cap.set(cv.CAP_PROP_POS_FRAMES, start_frame)
return start_frame, end_frame
def get_intersection(cap, numFrame, shape):
eof_flag = False
hists = np.zeros((numFrame, num_of_bins))
for k in range(numFrame):
_, img = cap.read()
if img is None:
eof_flag = True
break
img = cv.resize(img, (shape[1], shape[0]))
hsv_img = cv.cvtColor(img, cv.COLOR_BGR2HSV)
h, _ = np.histogram(hsv_img[:, :, 0], h_bins, (0, 255))
s, _ = np.histogram(hsv_img[:, :, 1], s_bins, (0, 255))
v, _ = np.histogram(hsv_img[:, :, 2], v_bins, (0, 255))
hists[k] = np.concatenate((h, s, v))
if img is None:
hists = hists[0:k]
numFrame = k
# compute intersection
hists = hists / (3 * shape[0] * shape[1])
hists_shift = hists[[0] + [x for x in range(numFrame - 1)]]
hists_pair = np.stack((hists, hists_shift), 2)
hists_min = np.min(hists_pair, axis=2)
S = | np.sum(hists_min, axis=1) | numpy.sum |
''' Define OLS class '''
################################################################################
# 1: Setup
################################################################################
# Import necessary packages
import numpy as np
import pandas as pd
import scipy.linalg as scl
import scipy.stats as scs
################################################################################
# 2: Auxiliary functions
################################################################################
# Define a function which turn a list or vector-like object into a proper two
# dimensional column vector
def cvec(a):
""" Turn a list or vector-like object into a proper column vector
Input
a: List or vector-like object, has to be a potential input for np.array()
Output
vec: two dimensional NumPy array, with the first dimension weakly greater
than the second (resulting in a column vector for a vector-like input)
"""
# Conver input into a two dimensional NumPy array
vec = np.array(a, ndmin=2)
# Check whether the second dimension is strictly greater than the first
# (remembering Python's zero indexing)
if vec.shape[0] < vec.shape[1]:
# If so, transpose the input vector
vec = vec.T
# Return the column vector
return vec
################################################################################
# 3: Define OLS class
################################################################################
# Define OLS model
class ols():
""" Runs OLS regressions """
# Define initialization function
def __init__(self, name_gen_X='X', name_gen_y='y', add_intercept=True,
name_gen_icept='(Intercept)', coef_only=False,
no_joint=False, covariance_estimator=None, freq_weights=False,
level=.05, df_adjust=None, verbose=True, fprec=np.float64,
nround=4):
""" Initialize ols() class
Inputs
name_gen_x: String, generic name prefix for X (RHS) variables, used if
no variable names are provided in ols.fit()
name_gen_y: String, generic name prefix for y (LHS) variable, used if no
variable name is provided in ols.fit()
add_intercept: Boolean, if True, an intercept will be added to the model
name_gen_icept: String, name to use for the intercept variable
coef_only: Boolean, if True, the model only calculates coefficients (and
not the covariance matrix, which is computationally costly)
no_joint: Boolean, if True, the model does not (by default) calculate a
Wald test of joint significane of all coefficients
covariance_estimator: String or None, covariance estimator to use.
Possible choices are:
'homoskedastic': The homoskedastic covariance
estimator
'hc1': The heteroskedasticity robust covariance
estimator of MacKinnon and White (1985)
'cluster': The cluster robust covariance estimator
proved to be unbiased in Williams
(2000)
If None, uses the default provided in
ols.ols_cov(), which is either 'hc1', if cluster
IDs were not provided in ols.fit(), or 'cluster',
if cluster IDs were provided
level: Scalar in [0,1], level for confidence intervals
verbose: Boolean, if True, some notes and warnings are printed (e.g. if
X'X is not invertible, and a pseudo inverse needs to be used)
fprec: Float data type, all floats will be cast to this type. The
default value is np.float64, to ensure high precision. Using
np.float32 instead can speed up NumPy's linear algebra in some
settings.
nround: Integer, results of ols.summarize() will be rounded to this
number of decimal points
"""
# Instantiate parameters
#
# Instantiate verbose flag
self.verbose = verbose
# Instantiate float precision data type
self.fprec = fprec
# Instantiate parameters for ols.fit()
self.name_gen_X = name_gen_X
self.name_gen_y = name_gen_y
self.n = None
self.k = None
self.add_icept = add_intercept
self.name_gen_icept = name_gen_icept
self.freq_weights = freq_weights
self.coef_only = coef_only
self.no_joint = no_joint
self.df_adjust = df_adjust
# Parameters for self.ols_ci()
self.level = level
# Parameters for self.summarize()
self.nround = nround
# Instantiate variables created by various methods
#
# Variables from ols_fit()
self.names_X = None
self.name_y = None
self.coef = None
self.est = None
# Variables created by self.ols_cov()
self.cov_est = covariance_estimator
self.V_hat = None
self.se = None
# Variables created by self.ols_t()
self.t = None
# Variables created by self.ols_ci()
self.ci = None
# Variables created by self.ols_p()
self.p = None
# Variables created by self.summarize()
self.regtable = None
# Variables created by self.score()
self.R2 = None
self.R2adj = None
# Variables created by self.wald()
self.W = None
self.pW = None
self.waldtable = None
# Define a function to fit the model
def fit(self, X, y, clusters=None, weights=None, names_X=None, name_y=None,
name_gen_X=None, name_gen_y=None, add_intercept=None,
coef_only=None, freq_weights=None, level=None, df_adjust=None,
no_joint=None, copy=True, **kwargs_wald):
""" Fit OLS model
Inputs
y: n by 1 vector-like, outcome variable
X: n by k matrix-like, RHS variables
clusters: n by 1 vector-like or None, cluster IDs
weights: n by 1 vector-like or None, weights
names_X: length n list or None, names for variables in X. If names_X is
None and X is a pandas DataFrame or Series, column names will
be used.
name_y: length n list or None, name for outcome variable. As with X,
this can be inferred if y is a pandas Series or DataFrame.
name_gen_x: String or None, see __init()__; if None, uses the value
provided in __init()__
name_gen_y: String or None, see __init()__; if None, uses the value
provided in __init()__
add_intercept: Boolean or None, see __init()__; if None, uses the value
provided in __init()__
coef_only: Boolean or None, see __init()__; if None, uses the value
provided in __init()__
no_joint: Boolean or None, see __init()__; if None, use the value
provided in __init()__
"""
# Check whether the generic name for X variables was changed
if name_gen_X is not None:
# If so, adjust self.name_gen_X
self.name_gen_X = name_gen_X
# Check whether the generic name for the y variable was changed
if name_gen_y is not None:
# If so, adjust self.name_gen_y
self.name_gen_y = name_gen_y
# Check whether the intercept paramter was changed
if add_intercept is not None:
# If so, adjust it
add_icept = add_intercept
else:
# Otherwise, use stored value from __init__()
add_icept = self.add_icept
# Check whether the coefficients only paramter was changed
if coef_only is None:
# If so, adjust it
coef_only = self.coef_only
if level is None:
level = self.level
if df_adjust is None:
df_adjust = self.df_adjust
# Check whether the joint significance parameter was changed
if no_joint is None:
# If so, adjust it
no_joint = self.no_joint
# Variables names
#
# Check whether names for X were provided
if names_X is not None:
# If so, adjust names_X
self.names_X = names_X
# Alternatively, check whether any data were provided as pandas objects,
# and get their names if necessary; otherwise, instantiate names
# automatically
#
# Check whether X is a pandas DataFrame
elif isinstance(X, pd.DataFrame):
# If so, set names_X to the the column names
self.names_X = X.columns
# Check whether X is a pandas Series
elif isinstance(X, pd.Series):
# If so, set names_X to the Series name
self.names_X = [X.name]
# If all else fails...
else:
# ... use generic names
self.names_X = [
self.name_gen_X+str(i+1) for i in np.arange(X.shape[1])
]
# Check whether names for y were provided
if name_y is not None:
# If so, adjust name_y
self.name_y = name_y
# Check whether y is a pandas Data Frame and name_y was not provided
elif isinstance(y, pd.DataFrame):
# If use, set name_y to the column name
self.name_y = y.columns[0]
# Check whether y is a pandas Series and name_y was not provided
elif isinstance(y, pd.Series):
# If so, set name_y to the Series name
self.name_y = y.name
# If all else fails...
else:
# ... use generic names
self.name_y = self.name_gen_y
# Copy X if necessary
if copy:
X = X.copy()
# Instantiate data matrices
#
# Start by instantiating the X data as is
if np.ndim(X) == 1:
# For one dimensional X, make sure this is a proper vector
X = cvec(X)
else:
# Otherwise, make sure it's a proper NumPy array
X = np.array(X)
# Check whether to add an intercept
if add_icept:
# If so, set up an intercept
cons = np.ones(shape=(X.shape[0], 1))
# Add it to the data matrix
X = np.concatenate([cons, X], axis=1)
# Add the intercept to the variables names
self.names_X = [self.name_gen_icept] + list(self.names_X)
# For speed considerations, make sure these are self.fprec types
X = X.astype(self.fprec)
# Get number of observations n and variables p
self.n, self.k = X.shape
# Instantiate y data elements
y = cvec(y).astype(self.fprec)
# Check whether clusters were not provided, but clustered errors are
# supposed to be used
if clusters is None and self.cov_est == 'cluster':
raise ValueError(
"Error in ols.fit(): The covariance estimator was set to"
+ " 'cluster', but clusters were not provided; please provide"
+ " cluster IDs, or change the covariance estimator"
)
# Check whether a cluster variable was provided
if clusters is not None:
# If so, adjust the cluster variable
clustvar = cvec(clusters)
# Otherwise, just set clustvar to None
else:
clustvar = None
# Check whether the frequency weights flag was changed from its standard
# None
if freq_weights is None:
# If not, use the provided value
freq_weights = self.freq_weights
# Instantiate weights matrix (this needs to be a diagonal matrix, and
# np.diag() only works as expected with a one dimensional input, so for
# once, I have to make sure this is one dimensional
if weights is not None:
# Start by instantiating the weights as is
if np.ndim(weights) == 1:
# For one dimensional weights, use them as is
W = np.array(weights)
else:
# Otherwise, make sure they're one dimensional
W = cvec(W)[:, 0]
# Check whether frequency weights need to be used
if not freq_weights:
# If not, normalize weights
W = W * (self.n / W.sum())
else:
# If these are frequency weights, update the number of
# observations
self.n = np.int(W.sum())
# Get diagonal weights matrix
W = np.diag(weights).astype(self.fprec)
else:
# If weights were not specified, use identity matrix
#W = np.eye(self.n)
W = None
# Calculate coefficient vector
if W is not None:
self.coef = (
cvec(scl.lstsq(np.sqrt(W) @ X, np.sqrt(W) @ y)[0])
).astype(self.fprec)
else:
self.coef = cvec(scl.lstsq(X, y)[0]).astype(self.fprec)
# Check whether to calculate anything besides the coefficients
if not coef_only:
# Calculate some inputs for covariance estimators
#
# Get residuals
U_hat = y - X @ self.coef
# Get other elements of the OLS model
#
# Get the covariance
sedf, Vdf = self.ols_cov(
X=X, residuals=U_hat, clusters=clustvar, weights=W,
df_adjust=df_adjust
)
# Get t-statistics
tdf = self.ols_t()
# Get confidence intervals
cidf = self.ols_ci(level=level, df_adjust=df_adjust)
# Get p-values
pdf = self.ols_p(df_adjust=df_adjust)
# Check whether to do a Wald test
if not no_joint:
# Do a Wald test
walddf = self.wald(**kwargs_wald)
waldstat = walddf.loc['Wald statistic', :]
waldp = walddf.loc['p-value', :]
else:
waldstat = None
waldp = None
else:
# Otherwise, set all other results to NAN. (This is important for
# coef_only=True to provide a speed-up. If this does not happen,
# some of the pandas DataFrames containing results are created
# automatically, and automatically inferring their shape and size
# takes a while. Without pre-setting these here, coef_only=True
# actually slows down the program.)
sedf = None
tdf = None
cidf = None
pdf = None
waldstat = None
waldp = None
Vdf = None
# Combine results into a dictionary
self.est = {
'coefficients': pd.DataFrame(
self.coef, index=self.names_X, columns=['Estimated coefficient']
),
'se': sedf,
'covariance estimator': self.cov_est,
't': tdf,
'ci': cidf,
'level': self.level,
'p': pdf,
'wald': waldstat,
'wald p': waldp,
'covariance matrix': Vdf#,
#'residuals': pd.DataFrame(U_hat, columns=['Residuals']),
#'clusters': pd.DataFrame(clustvar, columns=['Cluster ID'])
}
# Define a function to calculate the covariance matrix plus standard errors
def ols_cov(self, X, residuals, clusters=None, weights=None,
covariance_estimator=None, df_adjust=None):
""" Calculate covariance matrix and standard errors
Input
covariance_estimator: String or None, see __init()__; if None, uses the
value provided in __init()__
"""
# Instantiate weights (this function should never be called without W
# having been instantiated in self.__init__(), but just in case)
if weights is None:
W = None
else:
W = weights
if df_adjust is None:
kappa = 0
else:
kappa = df_adjust
# Calculate (X'X)^(-1)
if W is not None:
XXinv = scl.pinv(X.T @ W @ X)
else:
XXinv = scl.pinv(X.T @ X)
# Get residuals and clusters
U_hat = residuals
clustvar = clusters
# Check whether covariance_estimator was changed from the default None
if covariance_estimator is not None:
# If so, set cov_est to the specified covariance estimator
cov_est = covariance_estimator
# Otherwise, check whether the original value provided to __init__() was
# left at the default None, and no cluster IDs were provided
elif (self.cov_est is None) and (clustvar is None):
# If so, use HC1 as the default covariance estimator
cov_est = 'hc1'
# Otherwise, check whether the original value provided to __init__() was
# left at the default None, and cluster IDs were provided
elif (self.cov_est is None) and (clustvar is not None):
# If so, use clustered standard errors
cov_est = 'cluster'
else:
# Otherwise, use the specified covariance estimator
cov_est = self.cov_est
# Check whether clusters were provided, but a non-clustered covariance
# estimator is being used, and the class is set to be talkative
if (
(clustvar is not None)
and (cov_est != 'cluster')
and self.verbose
):
print('\nNote in ols(): Cluster IDs were provided, but a',
'non-clustered covariance estimator is being used')
# Check which covariance estimator to use
#
# Homoskedastic
if cov_est.lower() == 'homoskedastic':
# For the homoskedastic estimator, just calculate the standard
# variance
if W is not None:
self.V_hat = (
(1 / (self.n - self.k - kappa))
* XXinv * (U_hat.T @ W @ U_hat)
)
else:
self.V_hat = (
(1 / (self.n - self.k - kappa)) * XXinv * (U_hat.T @ U_hat)
)
# HC1
elif cov_est.lower() == 'hc1':
# Calculate component of middle part of EHW sandwich,
# S_i = X_i u_i, which makes it very easy to calculate
# sum_i X_i X_i' u_i^2 = S'S
S = (U_hat @ | np.ones(shape=(1, self.k)) | numpy.ones |
import numpy as np
from HamiltonianModule import spin_operators
class QES_1D:
def __init__(self, d, chi, D, l_phys, tau, spin='half', operators=None):
# l_phys: number of sites in the bulk to simulate the bath
self.d = d
self.chi = chi # bath dimension
self.D = D
self.l_phys = l_phys
self.tau = tau
self.gate_phys = np.zeros(0)
self.tensors_gate_phys = [np.zeros(0), np.zeros(0)] # two tensors of the physical gates, by SVD or QR
self.gate_bath = [np.zeros(0), np.zeros(0)] # the two physical-bath Hamiltonians
self.hamilt_bath = [np.zeros(0), np.zeros(0)]
if operators is None:
op_half = spin_operators(spin)
self.operators = [op_half['id'], op_half['sx'], op_half['sy'], op_half['sz'],
op_half['su'], op_half['sd']]
else:
self.operators = operators
def obtain_physical_gate_tensors(self, hamilt):
"""
gate_phys: physical gate (or shifted physical Hamiltonian) is 4-th tensor
0 1
\ /
G
/ \
2 3
"""
self.gate_phys = np.eye(self.d ** 2) - self.tau * hamilt
self.gate_phys = self.gate_phys.reshape(self.d, self.d, self.d, self.d)
u, s, v = np.linalg.svd(self.gate_phys.transpose(0, 2, 1, 3).reshape(self.d**2, self.d**2))
s = np.diag(s ** 0.5)
self.tensors_gate_phys[0] = u.dot(s).reshape(self.d, self.d, self.d**2).transpose(0, 2, 1)
self.tensors_gate_phys[1] = s.dot(v).reshape(self.d**2, self.d, self.d).transpose(1, 0, 2)
def obtain_bath_h(self, env, which, way='shift'):
"""
h_bath is 4-th tensor
0 1
\ /
G
/ \
2 3
"""
if (which is 'left') or (which is 'both'):
self.gate_bath[0] = np.tensordot(env[0], self.tensors_gate_phys[1], ([1], [1]))
self.gate_bath[0] = self.gate_bath[0].transpose(0, 2, 1, 3)
s = self.gate_bath[0].shape
self.hamilt_bath[0] = self.gate_bath[0].reshape(s[0] * s[1], s[2] * s[3]).copy()
lm, u = np.linalg.eigh(self.hamilt_bath[0])
lm /= np.max(lm)
if way is 'shift':
self.hamilt_bath[0] = u.dot(np.diag((np.ones((
s[0] * s[1],)) - lm) / self.tau)).dot(u.T.conj())
else:
self.hamilt_bath[0] = u.dot(np.diag(-np.log(abs(lm))/self.tau)).dot(u.T.conj())
self.hamilt_bath[0] = self.hamilt_bath[0] - np.trace(self.hamilt_bath[0]) * np.eye(
s[0]*s[1]) / (s[0]*s[1])
self.hamilt_bath[0] = (self.hamilt_bath[0] + self.hamilt_bath[0].T.conj())/2
if (which is 'right') or (which is 'both'):
self.gate_bath[1] = np.tensordot(self.tensors_gate_phys[0], env[1], ([1], [1]))
self.gate_bath[1] = self.gate_bath[1].transpose(0, 2, 1, 3)
s = self.gate_bath[1].shape
self.hamilt_bath[1] = self.gate_bath[1].reshape(s[0] * s[1], s[2] * s[3]).copy()
lm, u = np.linalg.eigh(self.hamilt_bath[1])
lm /= | np.max(lm) | numpy.max |
# -*- coding: utf-8 -*-
import csdmpy as cp
import numpy as np
from mrinversion.kernel import T1
from mrinversion.kernel import T2
kernel_dimension = cp.Dimension(type="linear", count=96, increment="20 ms")
inverse_kernel_dimension = cp.Dimension(
type="monotonic", coordinates=["1ms", "10ms", "100ms", "1s", "2s"]
)
def test_T2_kernel():
T2_obj = T2(
kernel_dimension=kernel_dimension,
inverse_kernel_dimension=inverse_kernel_dimension,
)
K = T2_obj.kernel(supersampling=1)
x = kernel_dimension.coordinates
x_inverse = inverse_kernel_dimension.coordinates
amp = np.exp(np.tensordot(-x, (1 / x_inverse), 0))
amp /= amp[:, 0].sum()
assert np.allclose(K, amp)
def test_T1_kernel():
T1_obj = T1(
kernel_dimension=kernel_dimension,
inverse_kernel_dimension=inverse_kernel_dimension,
)
K = T1_obj.kernel(supersampling=1)
x = kernel_dimension.coordinates
x_inverse = inverse_kernel_dimension.coordinates
amp = 1 - np.exp(np.tensordot(-x, (1 / x_inverse), 0))
amp /= amp[:, 0].sum()
assert | np.allclose(K, amp) | numpy.allclose |
#!/usr/bin/env python3
#
# Copyright 2020, Deepwave Digital, Inc.
# SPDX-License-Identifier: BSD-3-Clause
import numpy as np
from matplotlib import pyplot as plt
import SoapySDR
from SoapySDR import SOAPY_SDR_RX, SOAPY_SDR_CS16, errToStr
############################################################################################
# Settings
############################################################################################
# Data transfer settings
rx_chan = 0 # RX1 = 0, RX2 = 1
N = 16384 # Number of complex samples per transfer
fs = 31.25e6 # Radio sample Rate
freq = 2.4e9 # LO tuning frequency in Hz
use_agc = True # Use or don't use the AGC
timeout_us = int(5e6)
rx_bits = 16 # The AIR-T's ADC is 16 bits
############################################################################################
# Receive Signal
############################################################################################
# Initialize the AIR-T receiver using SoapyAIRT
sdr = SoapySDR.Device(dict(driver="SoapyAIRT")) # Create AIR-T instance
sdr.setSampleRate(SOAPY_SDR_RX, 0, fs) # Set sample rate
sdr.setGainMode(SOAPY_SDR_RX, 0, use_agc) # Set the gain mode
sdr.setFrequency(SOAPY_SDR_RX, 0, freq) # Tune the LO
# Create data buffer and start streaming samples to it
rx_buff = np.empty(2 * N, np.int16) # Create memory buffer for data stream
rx_stream = sdr.setupStream(SOAPY_SDR_RX, SOAPY_SDR_CS16, [rx_chan]) # Setup data stream
sdr.activateStream(rx_stream) # this turns the radio on
# Read the samples from the data buffer
sr = sdr.readStream(rx_stream, [rx_buff], N, timeoutUs=timeout_us)
rc = sr.ret # number of samples read or the error code
assert rc == N, 'Error {}: {}'.format(rc.ret, errToStr(rc.ret))
# Stop streaming
sdr.deactivateStream(rx_stream)
sdr.closeStream(rx_stream)
############################################################################################
# Plot Signal
############################################################################################
# Convert interleaved shorts (received signal) to numpy.complex64 normalized between [-1, 1]
s0 = rx_buff.astype(float) / np.power(2.0, rx_bits-1)
s = (s0[::2] + 1j*s0[1::2])
# Take the fourier transform of the signal and perform FFT Shift
S = np.fft.fftshift( | np.fft.fft(s, N) | numpy.fft.fft |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: <NAME>
Module with routines for conductivity problem.
"""
# Computation
import numpy as np
# Plotting
import matplotlib.pyplot as plt
# My stuff
from . import tensors as ten
# %% Bounds
def voigt_iso(v, K):
'''
Voigt bound for isotropic phases with volume fractions v and conductivities
K in dimension d in {2,3}. Returns a scalar.
Example:
v = [0.1,0.2,0.7]
K = [10,2,5]
'''
return np.sum(np.array(v) * np.array(K))
def voigt(v, K):
'''
Voigt bound with volume fractions v and conductivities
K for anisotorpic 3d problems. Returns a matrix.
Example:
v = [0.1,0.2,0.7]
K = [np.array([[10,2,0],[2,5,0],[0,0,8]]),2*np.eye(3),5*np.eye(3)]
'''
return ten.av(v, K)
def reuss_iso(v, K):
'''
Reuss bound for isotropic phases.
Returns a scalar.
'''
return 1 / np.sum(np.array(v) / np.array(K))
def reuss(v, K):
'''
Reuss for anisotropic phases.
Returns a matrix.
'''
return np.linalg.inv(ten.av(v, np.linalg.inv(K)))
def hs_iso(v, K, d=2):
'''
Hashin-Shtrikman bounds for isotropic phases in dimension d.
Torquato (2002), equation (21.23).
'''
Kmin = np.min(K)
Kmins = (d - 1) * Kmin
Kmax = np.max(K)
Kmaxs = (d - 1) * Kmax
low = 1 / np.sum(v * (1 / (K + Kmins))) - Kmins
upp = 1 / np.sum(v * (1 / (K + Kmaxs))) - Kmaxs
return np.array([low, upp])
def rP0(K0):
'''
Return polarization tensor from Willis 1977 for hs in 2/3-dimensional thermal conductivity.
(Willis 1977, i.e., isotropic 2-point statistics, no long-range order, isotropic C0,...)
'''
k0 = K0[0, 0]
d = len(K0)
return | np.eye(d) | numpy.eye |
"""This module defines the replay buffers. A replay buffer is a data structure that stores
transitions coming from the environment, and allows sampling. This module provides a base class
BaseReplayBuffer that defines the minimal interface that any replay buffer implementation should
provide.
Contents:
Base classes:
- BaseReplayBuffer
Implementations:
-FIFOReplayBuffer
"""
import collections
import random
import abc
import numbers
import numpy as np
# --------------------------------------- REPLAY BUFFERS ------------------------------------------#
class BaseReplayBuffer(abc.ABC, collections.UserList):
"""The base class for replay buffers.
Any derived replay buffer must present an Iterable interface, therefore allowing iteration,
sampling, etc.
"""
def add_iterable(self, iterable):
for i in iterable:
self.remember(i)
def __add__(self, e):
if hasattr(e, '__iter__'):
return self.add_iterable(e)
else:
return self.remember(e)
def append(self, e):
self.remember(e)
def extend(self, e):
return self.add_iterable(e)
@abc.abstractmethod
def remember(self, transition, *args, **kwargs):
"""Remember the given transition.
Args:
transition (tuple): A transition in the form (s, a, r, s', *info). After s' any
additional information can be passed.
"""
pass
@abc.abstractmethod
def sample(self, size, *args, **kwargs):
"""Sampling operation on the replay buffer.
Args:
size (int): Number of transitions to sample.
Returns:
A tuple (transitions, info) where transitions is a list of sampled transitions, and
info is a dictionary with additional information.
"""
pass
class FIFOReplayBuffer(BaseReplayBuffer):
"""Defines a simple fixed-size, FIFO evicted replay buffer, that stores transitions and allows
sampling.
Transitions are tuples in the form (s, a, r, s', ...), where after s' any additional
information can be stored.
"""
def __init__(self, maxlen):
"""Instantiate the replay buffer.
Args:
maxlen (int): Maximum number of transitions to be stored.
"""
super().__init__()
self.buffer = collections.deque(maxlen=maxlen)
def remember(self, transition, *args, **kwargs):
"""Store the given transition
Args:
transition (list): List in the form [s, a, r, s', ...]. Note that s' should be None
if the episode ended. After s' any additional information can be passed.
Raises:
AssertionError if given shapes do not match with those declared at initialization.
"""
self.buffer.append(transition)
def sample(self, size, *args, **kwargs):
"""Sample uniformly from the replay buffer.
Args:
size (int): Number of transitions to sample.
Returns:
A tuple (transitions, info). transitions is a list with the sampled transitions.
info is an empty dictionary.
"""
if size > len(self.buffer):
raise ValueError(
'Trying to sample ' + str(size) + ' items when buffer has only ' +
str(len(self.buffer)) + ' items.'
)
indices = np.arange(len(self.buffer))
sampled_indices = np.random.choice(a=indices, size=size, replace=False)
return [self.buffer[i] for i in sampled_indices], {} # Empty dict for compatibility
class PrioritizedReplayBuffer(FIFOReplayBuffer):
"""Implementation of a prioritized replay buffer.
This replay buffer stores transitions (s, a, r, s', w) where w is the weight. The sampling is
done by sampling a chunk of the given chunk_size, and performing a weighted sampling on it.
This allows sampling to be done in constant time. The probability of a transition i is given
by w_i^alpha / sum w_k^alpha. If exceeding the maximum length, samples are evicted with a
FIFO policy.
"""
def __init__(self, maxlen, alpha=0.8, chunk_size=2000):
"""Instantiate the replay buffer.
Args:
maxlen (int): Maximum number of transitions that the replay buffer will keep.
alpha (float): Level of prioritization, between 0 and 1.
chunk_size (int): Dimension of the random chunk from which transitions will be sampled.
"""
super().__init__(maxlen=maxlen)
self.alpha = alpha
self.chunk_size = chunk_size
self.avg_td_error = 0
def remember(self, transition, *args, **kwargs):
"""Add the transition to the buffer.
Args:
transition (list): Transition in the form [s, a, r, s', w, ...] where w is the
un-normalized weight of the transition.
"""
assert len(transition) >= 5, 'The given transition must be [s, a, r, s\', w, ...].'
super().remember(transition)
def sample(self, size, *args, **kwargs):
"""Sample the given number of transitions with probability proportional to the weights.
Args:
size (int): Number of transitions to sample.
Returns:
A tuple (transitions, info) where transitions is a list of the sampled transitions and
info is a dictionary {'weights': weights} with weights being the normalized weights
of the sampled transitions.
"""
if size > len(self.buffer):
raise ValueError(
'Trying to sample ' + str(size) + ' items when buffer has only ' +
str(len(self.buffer))
)
chunk = np.random.choice(a=np.arange(len(self.buffer)), size=self.chunk_size, replace=False)
td_errors = np.array([self.buffer[i][4] + 1e-6 for i in chunk])
self.avg_td_error = td_errors.mean() # Update statistics
# Compute probabilities and sample
probabilities = np.power(td_errors, self.alpha)
probabilities /= | np.sum(probabilities) | numpy.sum |
import numpy as np
import warnings
try:
import matplotlib.pyplot as pl
except ImportError:
warnings.warn("matplotlib could not be loaded!")
pass
from . import colors
from ..utils._legacy import kmeans
# .shape[0] messes up pylint a lot here
# pylint: disable=unsubscriptable-object
def image(shap_values, pixel_values=None, labels=None, width=20, aspect=0.2, hspace=0.2, labelpad=None, show=True):
""" Plots SHAP values for image inputs.
Parameters
----------
shap_values : [numpy.array]
List of arrays of SHAP values. Each array has the shap (# samples x width x height x channels), and the
length of the list is equal to the number of model outputs that are being explained.
pixel_values : numpy.array
Matrix of pixel values (# samples x width x height x channels) for each image. It should be the same
shape as each array in the shap_values list of arrays.
labels : list
List of names for each of the model outputs that are being explained. This list should be the same length
as the shap_values list.
width : float
The width of the produced matplotlib plot.
labelpad : float
How much padding to use around the model output labels.
show : bool
Whether matplotlib.pyplot.show() is called before returning. Setting this to False allows the plot
to be customized further after it has been created.
"""
# support passing an explanation object
if str(type(shap_values)).endswith("Explanation'>"):
shap_exp = shap_values
feature_names = [shap_exp.feature_names]
ind = 0
if len(shap_exp.base_values.shape) == 2:
shap_values = [shap_exp.values[..., i] for i in range(shap_exp.values.shape[-1])]
else:
raise Exception("Number of outputs needs to have support added!! (probably a simple fix)")
if pixel_values is None:
pixel_values = shap_exp.data
if labels is None:
labels = shap_exp.output_names
multi_output = True
if type(shap_values) != list:
multi_output = False
shap_values = [shap_values]
# make sure labels
if labels is not None:
labels = | np.array(labels) | numpy.array |
import numpy as np
import pandas as pd
from keras.layers import Dense, Dropout, Activation, Input
from keras.models import Model
baseInput = pd.read_csv('Inputs_VOPQ.csv')
baseOutput = pd.read_csv('Output_DL2.csv')
# Apagar alguns campos
baseInput = baseInput.drop('V1',axis = 1) # Apaga a coluna
baseInput = baseInput.drop('V2',axis = 1)
baseInput = baseInput.drop('V3',axis = 1)
baseInput = baseInput.drop('V6',axis = 1)
baseInput = baseInput.drop('V8',axis = 1)
baseInput = baseInput.drop('O1',axis = 1) # Apaga a coluna
baseInput = baseInput.drop('O2',axis = 1)
baseInput = baseInput.drop('O3',axis = 1)
baseInput = baseInput.drop('O6',axis = 1)
baseInput = baseInput.drop('O8',axis = 1)
baseInput = baseInput.iloc[:,0:18].values
baseOutput = baseOutput.drop('DL1',axis = 1)
baseOutput = baseOutput.drop('DL2',axis = 1)
baseOutput = baseOutput.drop('DL3',axis = 1)
baseOutput = baseOutput.drop('DL6',axis = 1)
baseOutput = baseOutput.drop('DL8',axis = 1)
# base = base.Output.drop('Developer',axis = 1)
# base = base.dropna(axis = 0) # Apaga todas as linhas que tiverem 'Nan'
# base = base.loc[base['NA_Sales'] > 1] # Pega somente valores maiores que 1 na coluna 'NA_Sales'
# base = base.loc[base['EU_Sales'] > 1] # Pega somente valores maiores que 1 na coluna 'EU_Sales'
# base['Name'].value_counts()
# nome_jogos = base.Name
# base = base.drop('Name',axis = 1)
# previsores = base.iloc[:,[0,1,2,3,7,8,9,10,11]].values
# venda_na = base.iloc[:,4].values
# venda_eu = base.iloc[:,5].values
# venda_jp = base.iloc[:,6].values
DL4 = baseOutput.iloc[:,0].values
DL5 = baseOutput.iloc[:,1].values
DL7 = baseOutput.iloc[:,2].values
DL9 = baseOutput.iloc[:,3].values
DL10 = baseOutput.iloc[:,4].values
DL11 = baseOutput.iloc[:,5].values
DL12 = baseOutput.iloc[:,6].values
DL13 = baseOutput.iloc[:,7].values
DL14 = baseOutput.iloc[:,8].values
#Outra maneira alternativa ao SKlearn
camada_entrada = Input(shape=(18,))
camada_oculta1 = Dense(units = 14, activation='sigmoid')(camada_entrada) # (input+output)/2 -> (28+14)/2
camada_oculta2 = Dense(units = 14, activation='sigmoid')(camada_oculta1)
#Nome camada de saída = topologia de rede neural(parâmetros)(camada a qual está ligada)
out4 = Dense(units = 1, activation='linear')(camada_oculta2)
out5 = Dense(units = 1, activation='linear')(camada_oculta2)
out7 = Dense(units = 1, activation='linear')(camada_oculta2)
out9 = Dense(units = 1, activation='linear')(camada_oculta2)
out10 = Dense(units = 1, activation='linear')(camada_oculta2)
out11 = Dense(units = 1, activation='linear')(camada_oculta2)
out12 = Dense(units = 1, activation='linear')(camada_oculta2)
out13 = Dense(units = 1, activation='linear')(camada_oculta2)
out14 = Dense(units = 1, activation='linear')(camada_oculta2)
regressor = Model(inputs = camada_entrada,
outputs = [out4,out5,out7,out9,out10,out11,out12,out13,out14])
regressor.compile(optimizer = 'adam', loss = 'mse')
regressor.fit(baseInput, [DL4,DL5,DL7,DL9,DL10,DL11,DL12,DL13,DL14],
epochs = 15000, batch_size = 20) #Batch_size = quantidade de testes antes de atualizar os pesos
out4, out5, out7, out9, out10, out11, out12, out13, out14 = regressor.predict(baseInput)
#Predicao = regressor.predict(baseInput)
#Predicao = pd.DataFrame((out4, out5, out7, out9, out10, out11, out12, out13, out14), columns=['out4', 'out5', 'out7', 'out9', 'out10', 'out11', 'out12', 'out13', 'out14'])
Predicao = | np.concatenate((out4, out5, out7, out9, out10, out11, out12, out13, out14),axis=1) | numpy.concatenate |
# pylint: disable=unsubscriptable-object
import os
import argparse
import numpy as np
import tensorflow as tf
from sklearn.datasets import load_iris
def quantize_data(header, dtypes, X):
for i, (h, dtype) in enumerate(zip(header, dtypes)):
if h[0] != 'f' or dtype != np.int32:
continue
x = X[:, i].copy()
nan_mask = np.isnan(x)
bins = np.quantile(x[~nan_mask], np.arange(33)/32)
bins = np.unique(bins)
X[:, i][~nan_mask] = np.digitize(
x[~nan_mask], bins, right=True)
X[:, i][nan_mask] = 33
return np.asarray([tuple(i) for i in X], dtype=list(zip(header, dtypes)))
def write_tfrecord_data(filename, data, header, dtypes):
fout = tf.io.TFRecordWriter(filename)
for i in range(data.shape[0]):
example = tf.train.Example()
for h, d, x in zip(header, dtypes, data[i]):
if d == np.int32:
example.features.feature[h].int64_list.value.append(x)
else:
example.features.feature[h].float_list.value.append(x)
fout.write(example.SerializeToString())
def write_data(output_type, filename, X, y, role, verify_example_ids):
if role == 'leader':
data = np.concatenate((X[:, :X.shape[1]//2], y), axis=1)
N = data.shape[1]-1
header = ['f%05d'%i for i in range(N)] + ['label']
dtypes = [np.float]*(N//2) + [np.int32]*(N - N//2) + [np.int32]
elif role == 'follower':
data = X[:, X.shape[1]//2:]
N = data.shape[1]
header = ['f%05d'%i for i in range(N)]
dtypes = [np.float]*(N//2) + [np.int32]*(N - N//2)
else:
data = np.concatenate((X, y), axis=1)
N = data.shape[1]-1
header = ['f%05d'%i for i in range(N)] + ['label']
dtypes = [np.float]*(N//2) + [np.int32]*(N - N//2) + [np.int32]
if verify_example_ids:
data = np.concatenate(
[[[i] for i in range(data.shape[0])], data], axis=1)
header = ['example_id'] + header
dtypes = [np.int32] + dtypes
data = quantize_data(header, dtypes, data)
if output_type == 'tfrecord':
write_tfrecord_data(filename, data, header, dtypes)
else:
np.savetxt(
filename,
data,
delimiter=',',
header=','.join(header),
fmt=['%d' if i == np.int32 else '%f' for i in dtypes],
comments='')
def process_mnist(X, y):
X = X.reshape(X.shape[0], -1)
X = np.asarray([X[i] for i, yi in enumerate(y) if yi in (2, 3)])
y = np.asarray([[y[i] == 3] for i, yi in enumerate(y) if yi in (2, 3)],
dtype=np.int32)
return X, y
def make_data(args):
if args.dataset == 'mnist':
(x_train, y_train), (x_test, y_test) = \
tf.keras.datasets.mnist.load_data()
x_train, y_train = process_mnist(x_train, y_train)
x_test, y_test = process_mnist(x_test, y_test)
else:
data = load_iris()
x_train = x_test = data.data
y_train = y_test = | np.minimum(data.target, 1) | numpy.minimum |
import numpy as np
from stl import mesh
import cv2
def convert_image(gray_img, out_dir, invert=False):
"""
takes image and converts it to STL based off of pixel value
@param {numpy image array} gray_img - numpy array with gray image
@param {string} out_dir - The file for the stl to be saved to
@param {boolean} invert - invert the heights. dark and light switches heights
@returns {None}
"""
#creates vertices from image with the height of the cube being based off of the pixels height
vertices_array = []
faces_array = []
for x,row in enumerate(gray_img):
for y,pixel in enumerate(row):
if pixel > 0:
if invert:
vertices = np.array([\
[x, y, 0],
[x+1, y, 0],
[x+1, y+1, 0],
[x, y+1, 0],
[x, y, 255-pixel],
[x+1, y, 255-pixel],
[x+1, y+1, 255-pixel],
[x, y+1, 255-pixel],
])
else:
vertices = np.array([\
[x, y, 0],
[x+1, y, 0],
[x+1, y+1, 0],
[x, y+1, 0],
[x, y, pixel],
[x+1, y, pixel],
[x+1, y+1, pixel],
[x, y+1, pixel],
])
vertices_array.append(vertices)
faces = np.array([[0,3,1],[1,3,2],[0,4,7],[0,7,3],[4,5,6],[4,6,7],[5,1,2],[5,2,6],[2,3,6],[3,7,6],[0,1,5],[0,5,4]])
#creates meshes from vertices
meshes = []
for vertice in vertices_array:
cube = mesh.Mesh( | np.zeros(faces.shape[0], dtype=mesh.Mesh.dtype) | numpy.zeros |
'''This script contains all of the functions used in our study'''
from __future__ import print_function
import math
import numpy as np
import scipy as sp
from scipy import special as spsp
from scipy import sparse as sparse
from scipy import stats as stats
from scipy import optimize as opt
from scipy import interpolate as interpolate
import time
import mpmath
import random
def poisson_loglh(data,lmbd):
'''
Calculate log likelihood of Poisson parameter lambda given data.
Parameters
----------
data : list
sample dataset
lmbd : float
estimated Poisson parameter
Returns
-------
llh : float
log likelihood of lmbd given data
'''
llh=0
for x in data:
llh+=np.log(stats.poisson.pmf(x,lmbd))
return llh
def geo_loglh(data,lmbd):
'''
Calculate log likelihood of geometric parameter lambda given data.
Parameters
----------
data : list
sample dataset
lmbd : float
estimated geometric parameter
Returns
-------
llh : float
log likelihood of lmbd given data
'''
llh=0
for x in data:
llh+=np.log(stats.geom.pmf(x,1/(lmbd+1),-1))
return llh
def neg_bin_loglh_theta(data,lmbd,theta):
'''
Calculate log likelihood of negative binomial parameters given data.
Parameters
----------
data : list
sample dataset
lmbd : float
estimated mean of negative binomial distribution
theta : float
estimated overdispersion of negative binomial distribution
Returns
-------
llh : float
log likelihood of lmbd and theta given data
'''
llh=0
for x in data:
llh+=np.log(stats.nbinom.pmf(x,lmbd/theta,1/(theta+1)))
return llh
def get_theta_mle(data,theta_0):
'''
Calculate maximum likelihood estimate of negative binomial overdispersion
parameter theta given sample data.
Parameters
----------
data : list
sample dataset
theta_0 : float
initial estimate of overdispersion parameter
Returns
-------
: float
maximum likelihood estimate of overdispersion parameter
'''
def f(theta):
lmbd=np.mean(data)
return -neg_bin_loglh_theta(data,lmbd,theta)
mle=sp.optimize.minimize(f,[theta_0],bounds=((1e-6,50),))
return mle.x[0]
def beta_poisson_pmf(x,lmbd,Phi,N):
'''
Evaluate the probability mass function for beta-Poisson distribution.
Parameters
----------
x : int or array
point(s) at which to evaluate function
lmbd : float
Phi : float
N : float
Returns
-------
P : float or array
probability of each point in x
'''
if type(x)==int:
P=spsp.hyp1f1(x+Phi*lmbd,x+Phi*N,-N)
for n in range(1,x+1): # This loop gives us the N^x/gamma(x+1 term)
P=(N/n)*P
for m in range(x): # This loop gives us the term with the two gamma functions in numerator and denominator
P=((m+Phi*lmbd)/(m+Phi*N))*P
else:
P=[]
for i in range(0,len(x)):
p=spsp.hyp1f1(x[i]+Phi*lmbd,x[i]+Phi*N,-N)
for n in range(1,x[i]+1): # This loop gives us the N^x/gamma(x+1 term)
p=(N/n)*p
for m in range(x[i]): # This loop gives us the term with the two gamma functions in numerator and denominator
p=((m+Phi*lmbd)/(m+Phi*N))*p
P=P+[p]
return P
hyp1f1_alt=np.frompyfunc(mpmath.hyp1f1,3,1)
def beta_poisson_loglh(data,lmbd,phi,N):
'''
Calculate log likelihood of beta-Poisson parameters given data.
Parameters
----------
data : list
sample dataset
lmbd : float
phi : float
N : float
Returns
-------
llh : float
log likelihood of parameters given data
'''
llh=0
for x in data:
llh+=x*np.log(N)-np.real(spsp.loggamma(x+1))+np.real(spsp.loggamma(phi*N))+np.real(spsp.loggamma(x+phi*lmbd))-np.real(spsp.loggamma(x+phi*N))-np.real(spsp.loggamma(phi*lmbd))
if x+phi*N<50:
llh+=np.log(spsp.hyp1f1(x+phi*lmbd,x+phi*N,-N))
else:
llh+=np.log(float(hyp1f1_alt(x+phi*lmbd,x+phi*N,-N)))
return llh
def neg_bin_loglh(data,lmbd,phi):
'''
Calculate log likelihood of negative binomial parameters given data, with
negative binomial parameterised with phi rather than theta.
Parameters
----------
data : list
sample dataset
lmbd : float
estimated mean of negative binomial distribution
phi : float
Returns
-------
llh : float
log likelihood of lmbd and theta given data
'''
llh=0
for x in data:
llh+=np.log(stats.nbinom.pmf(x,lmbd*phi,phi/(phi+1)))
return llh
def get_phi_and_N_mles(data,phi_0,N_0):
'''
Calculate maximum likelihood estimates of beta-Poisson parameters Phi and N.
Parameters
----------
data : list
sample dataset
phi_0 : float
initial estimate of Phi
N_0 : float
initial estimate of N
Returns
-------
: float
maximum likelihood estimate of Phi
: float
maximum likelihood estimate of N
'''
def f(params):
lmbd=np.mean(data)
phi=params[0]
if params[1]>0.1e-3:
N=1/params[1]
return -beta_poisson_loglh(data,lmbd,phi,N)
else:
return -neg_bin_loglh(data,lmbd,phi)
mle=sp.optimize.minimize(f,[phi_0,N_0],bounds=((1e-6,50),(0,1/np.mean(data))))
if mle.x[1]<0:
mle.x[1]=0
return mle.x[0],mle.x[1]
def zip_pmf(x,lmbd,sigma):
'''
Evaluate the probability mass function for zero-inflated Poisson
distribution.
Parameters
----------
x : int or array
point(s) at which to evaluate function
lmbd : float
mean of Poisson component
sigma : float
degree of zero inflation
Returns
-------
P : float or array
probability of each point in x
'''
if type(x)==int:
return sigma*(x==0)+(1-sigma)*stats.poisson.pmf(x,lmbd)
else:
return sigma*np.equal(x,np.zeros(len(x)))+(1-sigma)*stats.poisson.pmf(x,lmbd)
def zip_loglh(data,lmbd,sigma):
'''
Calculate log likelihood of zero-inflated Poisson parameters given data.
Parameters
----------
data : list
sample dataset
lmbd : float
mean of Poisson component
sigma : float
degree of zero inflation
Returns
-------
llh : float
log likelihood of lmbd and sigma given data
'''
llh=0
for x in data:
if x==0:
llh+=np.log(sigma+(1-sigma)*np.exp(-lmbd))
else:
llh+=np.log(1-sigma)+np.log(stats.poisson.pmf(x,lmbd))
return llh
def get_zip_mles(data,lmbd_0,sigma_0):
'''
Calculate maximum likelihood estimates of ZIP parameters lambda and sigma.
Parameters
----------
data : list
sample dataset
lmbd_0 : float
initial estimate of lambda
sigma_0 : float
initial estimate of sigma
Returns
-------
: float
maximum likelihood estimate of lambda
: float
maximum likelihood estimate of sigma
'''
def f(params):
lmbd=params[0]
sigma=params[1]
return -zip_loglh(data,lmbd,sigma)
mle=sp.optimize.minimize(f,[lmbd_0,sigma_0],bounds=((np.mean(data),50),(0,1-1e-6)))
return mle.x[0],mle.x[1]
def beta_poisson_pgf(s,lmbd,phi,N):
'''
Probability generating function of the beta-Poisson distribution.
Parameters
----------
s : float
point at which to evaluate PGF
lmbd : float
Phi : float
N : float
Returns
-------
G : float or array
PGF evaluated at s
'''
if np.size(np.asarray(s))==1:
G=spsp.hyp1f1(lmbd*phi,N*phi,N*(s-1));
else:
G=[]
for i in range(0,np.size(np.asarray(s))):
G=G+[spsp.hyp1f1(lmbd*phi,N*phi,N*(s[i]-1))]
return G
def poisson_pgf(s,lmbd):
'''
Probability generating function of the Poisson distribution.
Parameters
----------
s : float
point at which to evaluate PGF
lmbd : float
Returns
-------
G : float or array
PGF evaluated at s
'''
if np.size(np.asarray(s))==1:
G=np.exp(lmbd*(s-1))
else:
G=[]
for i in range(0,np.size(np.asarray(s))):
G=G+[np.exp(lmbd*(s[i]-1))]
return G
def geom_pgf(s,lmbd):
'''
Probability generating function of the geometric distribution.
Parameters
----------
s : float
point at which to evaluate PGF
lmbd : float
Returns
-------
G : float or array
PGF evaluated at s
'''
if np.size(np.asarray(s))==1:
G=1/(lmbd+1-lmbd*s)
else:
G=[]
for i in range(0,np.size(np.asarray(s))):
G=G+[1/(lmbd+1-lmbd*s[i])]
return G
def neg_bin_pgf(s,lmbd,theta):
'''
Probability generating function of the negative binomial distribution.
Parameters
----------
s : float
point at which to evaluate PGF
lmbd : float
theta : float
Returns
-------
G : float or array
PGF evaluated at s
'''
if np.size(np.asarray(s))==1:
G=(theta+1-s*theta)**(-lmbd/theta)
else:
G=[]
for i in range(0,np.size(np.asarray(s))):
G=G+[(theta+1-s[i]*theta)**(-lmbd/theta)]
return G
def zip_pgf(s,lmbd,sigma):
'''
Probability generating function of the zero-inflated Poisson distribution.
Parameters
----------
s : float
point at which to evaluate PGF
lmbd : float
sigma : float
Returns
-------
G : float or array
PGF evaluated at s
'''
if np.size(np.asarray(s))==1:
G=sigma+(1-sigma)*np.exp(lmbd*(s-1))
else:
G=[]
for i in range(0,np.size(np.asarray(s))):
G=G+[sigma+(1-sigma)*np.exp(lmbd*(s[i]-1))]
return G
def beta_poisson_extinction_prob( lmbd,phi,N ):
'''
Calculate the probability that the beta-Poisson branching process becomes
extinct.
Parameters
----------
lmbd : float
phi : float
N : float
Returns
-------
q : float
extinction probability
'''
if lmbd<=1:
return 1
else:
def f(s):
return beta_poisson_pgf(s,lmbd,phi,N)-s
q=opt.brentq(
f, 0, 1-1e-4);
return q
def poisson_extinction_prob( lmbd ):
'''
Calculate the probability that the Poisson branching process becomes
extinct.
Parameters
----------
lmbd : float
Returns
-------
q : float
extinction probability
'''
if lmbd<=1:
return 1
else:
def f(s):
return poisson_pgf(s,lmbd)-s
q=opt.brentq(
f, 0, 1-1e-6)
return q
def geom_extinction_prob( lmbd ):
'''
Calculate the probability that the geometric branching process becomes
extinct.
Parameters
----------
lmbd : float
Returns
-------
q : float
extinction probability
'''
if lmbd<=1:
return 1
else:
def f(s):
return geom_pgf(s,lmbd)-s
q=opt.brentq(
f, 0, 1-1e-6)
return q
def neg_bin_extinction_prob( lmbd,theta ):
'''
Calculate the probability that the negative binomial branching process
becomes extinct.
Parameters
----------
lmbd : float
theta : float
Returns
-------
q : float
extinction probability
'''
if lmbd<=1:
return 1
else:
def f(s):
return neg_bin_pgf(s,lmbd,theta)-s
q=opt.brentq(
f, 0, 1-1e-6)
return q
def zip_extinction_prob( lmbd,sigma ):
'''
Calculate the probability that the zero-inflated Poisson branching process
becomes extinct.
Parameters
----------
lmbd : float
sigma : float
Returns
-------
q : float
extinction probability
'''
if (1-sigma)*lmbd<=1:
return 1
else:
def f(s):
return zip_pgf(s,lmbd,sigma)-s
q=opt.brentq(
f, 0, 1-1e-6)
return q
def empirical_loglh(data):
'''
Calculate upper bound on log likelihood by using empirical distribution
based on observed frequencies in data.
Parameters
----------
data : list
sample data
Returns
-------
llh : float
log likelihood
'''
counts,bins=np.histogram(data,max(data)+1)
dist=counts/len(data)
llh=0
for x in data:
llh+=np.log(dist[x])
return llh
def get_lambda_and_phi_mles(data,lmbd_0,phi_0,N_emp):
'''
Calculate maximum likelihood estimates of beta-Poisson parameters lambda and
Phi given empirical estimate of contact parameter N.
Parameters
----------
data : list
sample dataset
lmbd_0 : float
initial estimate of lambda
phi_0 : float
initial estimate of Phi
N_emp : float
empirical estimate of N
Returns
-------
: float
maximum likelihood estimate of lambda
: float
maximum likelihood estimate of Phi
'''
def f(params):
lmbd=params[0]
phi=params[1]
return -beta_poisson_loglh(data,lmbd,phi,N_emp)
mle=sp.optimize.minimize(f,[lmbd_0,phi_0],bounds=((1e-6,10),(1e-6,50)))
return mle.x[0],mle.x[1]
def generate_mle_dict(data,
theta_0,
phi_0,
N_0,
lmbd_0,
sigma_0):
'''
Calculate maximum likelihood estimates of parameters for each offspring
distribution and output them as a dictionary.
Parameters
----------
data : list
sample data to fit to
theta_0 : float
initial estimate of negative binomial overdispersion parameter
phi_0 : float
initial estimate of beta-Poisson parameter Phi
N_0 : float
initial estimate of beta-Poisson parameter Phi
lmbd_0 : float
initial estimate of ZIP baseline lambda
sigma_0 : float
initial estimate of ZIP inflation parameter sigma
Returns
-------
mle_dict : dictionary
dictionary containing maximum likelihood estimates of parameters for
each model.
'''
theta_mle=get_theta_mle(data,1.5)
phi_mle,N_inv_mle=get_phi_and_N_mles(data,0.5,1/2)
lmbd_mle,sigma_mle=get_zip_mles(data,1.5,0.5)
mle_dict = {
'poisson' : np.mean(data),
'geometric' : np.mean(data),
'negative binomial' : [np.mean(data), theta_mle],
'zip' : [lmbd_mle, sigma_mle],
'beta-Poisson' : [np.mean(data), phi_mle, N_inv_mle]
}
return mle_dict
def poisson_mle_grid(data, interval, points):
'''
Calculate a confidence interval for the MLE of the Poisson distribution
given some data using a grid calculation.
Parameters
----------
data : list
sample dataset
interval : list
list containing the lower and upper bounds on which to perform the
grid calculation
points : int
number of points to use in the grid calculation
Returns
-------
lmbd : array
value of lambda over grid
llh : array
log likelihood of lambda values over grid
mle : float
maximum likelihood estimate of lambda
ci : list
95% confidence interval for lambda given data
'''
lmbd=np.linspace(interval[0],interval[1],points)
llh=np.zeros(points)
for x in data:
llh=llh+np.log(stats.poisson.pmf(x,lmbd))
mle_loc=np.argmax(llh)
mle=lmbd[mle_loc]
lh_normed=np.exp(llh)/np.sum( | np.exp(llh) | numpy.exp |
import sys
import time
import visa
import numpy as np
import matplotlib.pyplot as plt
TCP_HOST = '169.254.70.102' if len(sys.argv) < 2 else sys.argv[1]
TCP_PORT = 5000
rm = visa.ResourceManager('@py')
inst = rm.open_resource(f'TCPIP0::{TCP_HOST}::{TCP_PORT}::SOCKET')
inst.write_termination = '\r\n'
inst.read_termination = '\r\n'
BUFFER_SIZE = 2**14 # samples
BASE_SAMPLERATE = 125e6 # samples per second
DECIMATIONS = np.array([1, 8, 64, 1024, 8192, 65536])
def acquire_data(f, amp=1, delay=8192, inst=inst, quiet=False):
# Choose decimation factor where f is oversampled by just about a
# factor of 20.
targetdecimation = BASE_SAMPLERATE / (f * 100)
decimation = DECIMATIONS[DECIMATIONS <= targetdecimation].max(initial=1)
buffertime = BUFFER_SIZE / (BASE_SAMPLERATE / decimation) # s
ncyc = 10 + int(f * buffertime)
#print(f, targetdecimation, decimation, buffertime, ncyc)
inst.write('GEN:RST') # reset function generator
inst.write('ACQ:RST') # reset analog input
inst.write('DIG:PIN LED0,1')
# setup function generator
inst.write('SOUR1:FUNC SINE')
inst.write('SOUR1:FREQ:FIX {:f}'.format(f))
inst.write('SOUR1:VOLT {:f}'.format(amp))
# inst.write('SOUR1:BURS:STAT OFF')
# print(inst.query('*IDN?'))
inst.write('SOUR1:BURS:NCYC {:d}'.format(ncyc))
inst.write('OUTPUT1:STATE ON')
# setup oscilloscope
inst.write('ACQ:DEC {:f}'.format(decimation))
inst.write('ACQ:TRIG:LEV {:f}'.format(amp/10))
inst.write('ACQ:TRIG:DLY {:d}'.format(delay))
inst.write('ACQ:START')
# wait for buffer to fill
if BUFFER_SIZE/2 - delay > 0:
time.sleep(1.1 * (BUFFER_SIZE/2 - delay)
/ (BASE_SAMPLERATE / decimation))
inst.write('DIG:PIN LED0,0')
inst.write('DIG:PIN LED1,1')
# trigger from function generator
inst.write('ACQ:TRIG AWG_PE')
inst.write('SOUR1:TRIG:IMM')
# wait for trigger
while True:
trig_rsp = inst.query('ACQ:TRIG:STAT?')
if not quiet:
sys.stdout.write(trig_rsp + ' ')
sys.stdout.flush()
if trig_rsp == 'TD':
if not quiet:
sys.stdout.write('\n')
break
inst.write('DIG:PIN LED1,0')
inst.write('DIG:PIN LED2,1')
# read data from IN1
inst.write('ACQ:SOUR1:DATA?')
data1 = inst.read()
assert data1[0] == '{' and data1[-1] == '}'
data1 = np.array(list(float(v) for v in data1[1:-1].split(',')))
# read data from IN2
inst.write('ACQ:SOUR2:DATA?')
data2 = inst.read()
assert data2[0] == '{' and data2[-1] == '}'
data2 = np.array(list(float(v) for v in data2[1:-1].split(',')))
inst.write('GEN:RST')
inst.write('ACQ:RST')
inst.write('DIG:PIN LED2,0')
return BASE_SAMPLERATE/decimation, data1, data2
def demodulate(signal, f, samplerate):
# cut to multiple of cycles
ncycles = np.floor(len(signal) / (samplerate/f))
nsamples = int(ncycles * (samplerate/f))
dat = signal[:nsamples]
# remove mean
dat = dat - np.mean(dat)
# reference oscillator
refc = np.cos(np.arange(len(dat)) * 2*np.pi * f/samplerate)
refs = np.sin(np.arange(len(dat)) * 2*np.pi * f/samplerate)
I = np.trapz(dat * refc) # noqa
Q = np.trapz(dat * refs)
A = np.sqrt(I**2 + Q**2) * 2 / len(dat)
φ = n | p.arctan2(-Q, I) | numpy.arctan2 |
# Copyright (c) 2017-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
from caffe2.proto import caffe2_pb2
from caffe2.python import core
from caffe2.python import gradient_checker
from caffe2.python import workspace
def test_forward_and_gradient():
X = np.random.randn(1, 7, 56, 56).astype(np.float32)
Y = np.random.randn(1, 7, 56, 56).astype(np.float32)
Weights = np.random.randn(1, 7).astype(np.float32)
scale = | np.random.random() | numpy.random.random |
import os
import argparse
import time
import numpy as np
import torch
from tqdm import tqdm
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import roc_auc_score
from datasets.mvtec import MVTecDataset
from models.resnet_backbone import modified_resnet18
from utils.util import time_string, convert_secs2time, AverageMeter
from utils.functions import cal_anomaly_maps, cal_loss
from utils.visualization import plt_fig
class STPM():
def __init__(self, args):
self.device = args.device
self.data_path = args.data_path
self.obj = args.obj
self.img_resize = args.img_resize
self.img_cropsize = args.img_cropsize
self.validation_ratio = args.validation_ratio
self.num_epochs = args.num_epochs
self.lr = args.lr
self.batch_size = args.batch_size
self.vis = args.vis
self.model_dir = args.model_dir
self.img_dir = args.img_dir
self.load_model()
self.load_dataset()
# self.criterion = torch.nn.MSELoss(reduction='sum')
self.optimizer = torch.optim.SGD(self.model_s.parameters(), lr=self.lr, momentum=0.9, weight_decay=0.0001)
def load_dataset(self):
kwargs = {'num_workers': 4, 'pin_memory': True} if torch.cuda.is_available() else {}
train_dataset = MVTecDataset(self.data_path, class_name=self.obj, is_train=True, resize=self.img_resize, cropsize=self.img_cropsize)
img_nums = len(train_dataset)
valid_num = int(img_nums * self.validation_ratio)
train_num = img_nums - valid_num
train_data, val_data = torch.utils.data.random_split(train_dataset, [train_num, valid_num])
self.train_loader = torch.utils.data.DataLoader(train_data, batch_size=self.batch_size, shuffle=True, **kwargs)
self.val_loader = torch.utils.data.DataLoader(val_data, batch_size=32, shuffle=False, **kwargs)
def load_model(self):
self.model_t = modified_resnet18().to(self.device)
self.model_s = modified_resnet18(pretrained=False).to(self.device)
for param in self.model_t.parameters():
param.requires_grad = False
self.model_t.eval()
def train(self):
self.model_s.train()
best_score = None
start_time = time.time()
epoch_time = AverageMeter()
for epoch in range(1, self.num_epochs+1):
need_hour, need_mins, need_secs = convert_secs2time(epoch_time.avg * ((self.num_epochs+1) - epoch))
need_time = '[Need: {:02d}:{:02d}:{:02d}]'.format(need_hour, need_mins, need_secs)
print('{:3d}/{:3d} ----- [{:s}] {:s}'.format(epoch, self.num_epochs, time_string(), need_time))
losses = AverageMeter()
for (data, _, _) in tqdm(self.train_loader):
data = data.to(self.device)
self.optimizer.zero_grad()
with torch.set_grad_enabled(True):
features_t = self.model_t(data)
features_s = self.model_s(data)
loss = cal_loss(features_s, features_t)
losses.update(loss.sum().item(), data.size(0))
loss.backward()
self.optimizer.step()
print('Train Epoch: {} loss: {:.6f}'.format(epoch, losses.avg))
val_loss = self.val(epoch)
if best_score is None:
best_score = val_loss
self.save_checkpoint()
elif val_loss < best_score:
best_score = val_loss
self.save_checkpoint()
epoch_time.update(time.time() - start_time)
start_time = time.time()
print('Training end.')
def val(self, epoch):
self.model_s.eval()
losses = AverageMeter()
for (data, _, _) in tqdm(self.val_loader):
data = data.to(self.device)
with torch.set_grad_enabled(False):
features_t = self.model_t(data)
features_s = self.model_s(data)
loss = cal_loss(features_s, features_t)
losses.update(loss.item(), data.size(0))
print('Val Epoch: {} loss: {:.6f}'.format(epoch, losses.avg))
return losses.avg
def save_checkpoint(self):
print('Save model !!!')
state = {'model':self.model_s.state_dict()}
torch.save(state, os.path.join(self.model_dir, 'model_s.pth'))
def test(self):
try:
checkpoint = torch.load(os.path.join(self.model_dir, 'model_s.pth'))
except:
raise Exception('Check saved model path.')
self.model_s.load_state_dict(checkpoint['model'])
self.model_s.eval()
kwargs = {'num_workers': 4, 'pin_memory': True} if torch.cuda.is_available() else {}
test_dataset = MVTecDataset(self.data_path, class_name=self.obj, is_train=False, resize=self.img_resize, cropsize=self.img_cropsize)
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=32, shuffle=False, **kwargs)
scores = []
test_imgs = []
gt_list = []
gt_mask_list = []
print('Testing')
for (data, label, mask) in tqdm(test_loader):
test_imgs.extend(data.cpu().numpy())
gt_list.extend(label.cpu().numpy())
gt_mask_list.extend(mask.squeeze().cpu().numpy())
data = data.to(self.device)
with torch.set_grad_enabled(False):
features_t = self.model_t(data)
features_s = self.model_s(data)
score = cal_anomaly_maps(features_s, features_t, self.img_cropsize)
scores.extend(score)
scores = np.asarray(scores)
max_anomaly_score = scores.max()
min_anomaly_score = scores.min()
scores = (scores - min_anomaly_score) / (max_anomaly_score - min_anomaly_score)
img_scores = scores.reshape(scores.shape[0], -1).max(axis=1)
gt_list = np.asarray(gt_list)
img_roc_auc = roc_auc_score(gt_list, img_scores)
print('image ROCAUC: %.3f' % (img_roc_auc))
precision, recall, thresholds = precision_recall_curve(gt_list.flatten(), img_scores.flatten())
a = 2 * precision * recall
b = precision + recall
f1 = np.divide(a, b, out=np.zeros_like(a), where=b != 0)
cls_threshold = thresholds[np.argmax(f1)]
gt_mask = | np.asarray(gt_mask_list) | numpy.asarray |
import shutil
import util.util as util_
import os
import cv2
import open3d as o3d
import pickle
import numpy as np
from scipy.optimize import linear_sum_assignment
import trimesh
from skimage import measure
import scipy
from sklearn.neighbors import KDTree
from scipy.ndimage.measurements import label
import data_augmentation
from genre.voxelization import voxel
import traceback
from genre.util import util_sph
from scipy import stats
from dm_control.mujoco.engine import Camera
from trajopt.mujoco_utils import add_object_to_mujoco, remove_objects_from_mujoco, get_mesh_list, compute_mujoco_int_transform
mesh_level=0.5
def chamfer_distance(pcd_1, pcd_2):
pcd_tree = KDTree(pcd_2)
nearest_distances_1, _=pcd_tree.query(pcd_1)
pcd_tree = KDTree(pcd_1)
nearest_distances_2, _=pcd_tree.query(pcd_2)
return np.sum(nearest_distances_1)/pcd_1.shape[0]+np.sum(nearest_distances_2)/pcd_2.shape[0]
#return outher shell of voxel shape
def hollow_dense_pointcloud(ptcld):
conv=scipy.ndimage.convolve(ptcld, np.ones((3,3,3)))
ptcld=np.where(conv<27, ptcld, 0)
return ptcld
def compute_xyz(depth_img, camera_params):
""" Compute ordered point cloud from depth image and camera parameters
@param depth_img: a [H x W] numpy array of depth values in meters
@param camera_params: a dictionary with parameters of the camera used
"""
# Compute focal length from camera parameters
if 'fx' in camera_params and 'fy' in camera_params:
fx = camera_params['fx']
fy = camera_params['fy']
else: # simulated data
aspect_ratio = camera_params['img_width'] / camera_params['img_height']
e = 1 / (np.tan(np.radians(camera_params['fov']/2.)))
t = camera_params['near'] / e; b = -t
r = t * aspect_ratio; l = -r
alpha = camera_params['img_width'] / (r-l) # pixels per meter
focal_length = camera_params['near'] * alpha # focal length of virtual camera (frustum camera)
fx = focal_length; fy = focal_length
if 'x_offset' in camera_params and 'y_offset' in camera_params:
x_offset = camera_params['x_offset']
y_offset = camera_params['y_offset']
else: # simulated data
x_offset = camera_params['img_width']/2
y_offset = camera_params['img_height']/2
indices = util_.build_matrix_of_indices(camera_params['img_height'], camera_params['img_width'])
indices[..., 0] = np.flipud(indices[..., 0]) # pixel indices start at top-left corner. for these equations, it starts at bottom-left
z_e = depth_img
x_e = (indices[..., 1] - x_offset) * z_e / fx
y_e = (indices[..., 0] - y_offset) * z_e / fy
xyz_img = np.stack([x_e, y_e, z_e], axis=-1) # Shape: [H x W x 3]
return xyz_img
upsample=1
xmap = np.array([[j for i in range(int(upsample*640))] for j in range(int(upsample*480))])
ymap = np.array([[i for i in range(int(upsample*640))] for j in range(int(upsample*480))])
#make pointcloud from depth image
def make_pointcloud_all_points(depth_image):
cam_scale = 1.0
cam_cx = 320.0
cam_cy = 240.0
camera_params={'fx':579.411255, 'fy':579.411255, 'img_width':640, 'img_height': 480}
depth_masked = depth_image.flatten()[:, np.newaxis].astype(np.float32)
xmap_masked = xmap.flatten()[:, np.newaxis].astype(np.float32)
ymap_masked = ymap.flatten()[:, np.newaxis].astype(np.float32)
pt2 = depth_masked / cam_scale
pt0 = (ymap_masked/upsample - cam_cx) * pt2 / (camera_params['fx'])
pt1 = (xmap_masked/upsample - cam_cy) * pt2 / (camera_params['fy'])
cloud = np.concatenate((pt0, -pt1, -pt2), axis=1)
return cloud
def color_code_objects(frame, state_id_to_model_pixels, display=False):
#generate object color mapping
labels=np.unique(frame)
exec_dir=os.path.dirname(os.path.realpath(__file__))
color_map_file_name=exec_dir+'/data/object_color_maps/object_color_map_size_'+str(labels.shape[0])+'.p'
if os.path.isfile(color_map_file_name):
object_color_map=pickle.load(open(color_map_file_name, "rb" ))
else:
self.object_color_map=glasbey.get_colors(len(state_id_to_model_pixels))
pickle.dump(self.object_color_map, open(color_map_file_name, "wb" ))
#create labelled image
labelled_frame=np.zeros((frame.shape[0], frame.shape[1], 3))
for label in range(labels.shape[0]):
object_pixel_positions_exact=np.argwhere(frame==label)
object_pixel_positions_exact_in_bounds=object_pixel_positions_exact.astype(int)
if len(object_pixel_positions_exact_in_bounds.shape)==2 and object_pixel_positions_exact_in_bounds.shape[0]>0 and object_pixel_positions_exact_in_bounds.shape[1]==2:
object_color=object_color_map[label]
labelled_frame[object_pixel_positions_exact_in_bounds[:, 0], object_pixel_positions_exact_in_bounds[:, 1]]=object_color
if display:
cv2.imshow('object labels', labelled_frame)
cv2.waitKey(20)
return labelled_frame
border_list = [-1, 40, 80, 120, 160, 200, 240, 280, 320, 360, 400, 440, 480, 520, 560, 600, 640, 680]
def get_bbox(bbx):
if bbx[0] < 0:
bbx[0] = 0
if bbx[1] >= 480:
bbx[1] = 479
if bbx[2] < 0:
bbx[2] = 0
if bbx[3] >= 640:
bbx[3] = 639
rmin, rmax, cmin, cmax = bbx[0], bbx[1], bbx[2], bbx[3]
r_b = rmax - rmin
for tt in range(len(border_list)):
if r_b > border_list[tt] and r_b < border_list[tt + 1]:
r_b = border_list[tt + 1]
break
c_b = cmax - cmin
for tt in range(len(border_list)):
if c_b > border_list[tt] and c_b < border_list[tt + 1]:
c_b = border_list[tt + 1]
break
center = [int((rmin + rmax) / 2), int((cmin + cmax) / 2)]
rmin = center[0] - int(r_b / 2)
rmax = center[0] + int(r_b / 2)
cmin = center[1] - int(c_b / 2)
cmax = center[1] + int(c_b / 2)
if rmin < 0:
delt = -rmin
rmin = 0
rmax += delt
if cmin < 0:
delt = -cmin
cmin = 0
cmax += delt
if rmax > 480:
delt = rmax - 480
rmax = 480
rmin -= delt
if cmax > 640:
delt = cmax - 640
cmax = 640
cmin -= delt
return rmin, rmax, cmin, cmax
#transform robot meshes into current position
def make_known_meshes(known_meshes, physics, geom_names):
transformed_known_meshes=[]
for known_mesh_ind in range(len(known_meshes)):
transformed_known_mesh=known_meshes[known_mesh_ind].copy()
transform=np.eye(4)
transform[0:3,0:3]=np.reshape(physics.named.data.geom_xmat[geom_names[known_mesh_ind]],(3,3))
transformed_known_mesh.apply_transform(transform)
transform=np.eye(4)
transform[0:3,3]=physics.named.data.geom_xpos[geom_names[known_mesh_ind]]
transformed_known_mesh.apply_transform(transform)
transformed_known_meshes.append(transformed_known_mesh)
return transformed_known_meshes
#select voxel points in cube around target object, also compute table surface height
def select_points_in_cube_voxelize_sphr_proj(self, all_points, i, grid_size=128, estimate_table=False, sub_vox=0, min_z=None, unocc=None):
low=np.array([-0.5,-0.5,-0.5])
hi=np.array([0.5,0.5,0.5])
points=all_points[np.argwhere(np.all(np.logical_and(all_points>=low, all_points<=hi), axis=1))][:,0,:]
voxels=np.zeros((grid_size,grid_size,grid_size))
inds=np.floor((points + 0.5) * grid_size).astype(int)
if sub_vox!=0:
inds[:,2]=inds[:,2]-sub_vox/(128/grid_size)
az_inds=np.argwhere(inds[:,2]>=0)
inds=inds[az_inds[:,0]]
inds=np.clip(inds, 0, grid_size-1)
if inds.shape[0]==0:
if estimate_table:
return np.zeros((128,128,128)), np.zeros((160,160)), None, 0
else:
return np.zeros((128,128,128)), np.zeros((160,160)), None
voxels[inds[:, 0], inds[:, 1], inds[:, 2]] = 1.0
if unocc is not None:
voxels=np.clip(voxels-unocc, 0, 1)
if estimate_table:
more_points=all_points[np.argwhere(np.all(np.logical_and(all_points>=np.array([-3,-3,-1]), all_points<=np.array([2,2,min_z+0.01])), axis=1))][:,0,:]
more_inds=np.floor((more_points + 0.5) * grid_size).astype(int)
a=more_inds[:,2]
max_inds=scipy.stats.mode(more_inds[:,2], axis=None)[0][0]
inds[:,2]=inds[:,2]-max_inds
az_inds=np.argwhere(inds[:,2]>=0)
inds=inds[az_inds[:,0]]
voxels=np.zeros((grid_size,grid_size,grid_size))
voxels[:,:,0]=1
voxels[inds[:, 0], inds[:, 1], inds[:, 2]] = 1.0
no_points=False
verts, faces, normals, values = measure.marching_cubes_lewiner(
voxels, spacing=(1 / grid_size, 1 / grid_size, 1 / grid_size))
mesh = trimesh.Trimesh(vertices=verts - 0.5, faces=faces, vertex_normals=normals)
trimesh.repair.fix_inversion(mesh)
if verts.shape[0]>50000:
mesh.export(f'/dev/shm/temp_mesh_conv_{i}.ply')
o3d_mesh=o3d.io.read_triangle_mesh(f'/dev/shm/temp_mesh_conv_{i}.ply')
o3d_mesh=o3d_mesh.simplify_vertex_clustering(0.05)
mesh=trimesh.Trimesh(vertices=np.asarray(o3d_mesh.vertices), faces=np.asarray(o3d_mesh.triangles), face_normals=np.asarray(o3d_mesh.triangle_normals), process=False)
os.remove(f'/dev/shm/temp_mesh_conv_{i}.ply')
proj=util_sph.proj_spherical(mesh)
if grid_size!=128:
full_voxels=np.zeros((128,128,128))
voxels=np.zeros((128,128,128))
inds=np.floor((points + 0.5) * 128).astype(int)
inds=np.clip(inds, 0, 128-1)
if sub_vox!=0:
inds[:,2]=inds[:,2]-sub_vox
az_inds=np.argwhere(inds[:,2]>=0)
inds=inds[az_inds[:,0]]
full_voxels[inds[:, 0], inds[:, 1], inds[:, 2]] = 1.0
voxels=full_voxels
if estimate_table:
return voxels, proj, no_points, max_inds
else:
return voxels, proj, no_points
class pose_model_estimator():
def __init__(self, physics, seg_send, seg_receive, recon_send, recon_receive, project_dir, mj_scene_xml, save_id, use_cuda_vox, extrusion_baseline, custom_recon_net=False, max_known_body_id=70, voxels_per_meter=256, model=None, simulate_model_quality=False, model_quality=0, quality_type='', four_channel=False):#
self.obs_cds=[]
self.past_filled_voxels=None
self.top_dir=project_dir
self.scene_xml_file=mj_scene_xml
self.save_id=save_id
self.simulate_model_quality=simulate_model_quality
self.model_quality=model_quality
self.quality_type=quality_type
self.four_channel=four_channel
self.custom_recon_net=custom_recon_net
self.seg_send=seg_send
self.seg_receive=seg_receive
self.recon_send=recon_send
self.recon_receive=recon_receive
#1: load and voxelize all known meshes
mesh_list, self.mesh_name_to_file, self.name_to_scale_dict=get_mesh_list(mj_scene_xml)[:max_known_body_id]
mesh_list=mesh_list[:69]
self.included_meshes=[]
self.geom_names=[]
self.pred_obj_meshes=[]
self.use_cuda_vox=use_cuda_vox
self.extrusion_baseline=extrusion_baseline
self.upsample=1
self.palm_mesh_verts=None
self.xmap = np.array([[j for i in range(int(self.upsample*640))] for j in range(int(self.upsample*480))])
self.geom_ids=[]
self.ymap = np.array([[i for i in range(int(self.upsample*640))] for j in range(int(self.upsample*480))])
for geom_name in model.named.data.geom_xpos.axes.row.names:
num_voxels=256
geom_id=model.model.name2id(geom_name, "geom")
self.geom_ids.append(geom_id)
if geom_id<71:
if model.model.geom_dataid[geom_id]>-1:
mesh_name=model.model.id2name(model.model.geom_dataid[geom_id], "mesh")
mesh=trimesh.load_mesh(self.mesh_name_to_file[model.model.id2name(model.model.geom_dataid[geom_id], "mesh")])
mesh_off_trans, mesh_off_rot=compute_mujoco_int_transform(self.mesh_name_to_file[model.model.id2name(model.model.geom_dataid[geom_id], "mesh")], save_id)
if geom_name=="herb/wam_1/bhand//unnamed_geom_0":
c_mesh=mesh.convex_hull
scale=2*np.amax(np.abs(c_mesh.bounds))
print('cmesh bounds', c_mesh.bounds)
scale_mat=np.eye(4)
scale_mat=scale_mat/scale
scale_mat[3,3]=1.0
s_palm_mesh_vertices=c_mesh.copy().apply_transform(scale_mat)
self.palm_mesh_verts=voxel.voxelize_model_binvox(s_palm_mesh_vertices, 32, self.save_id, binvox_add_param='-bb -.5 -.5 -.5 .5 .5 .5', use_cuda_vox=False)
a=np.argwhere(self.palm_mesh_verts)
self.palm_mesh_verts=(np.argwhere(hollow_dense_pointcloud(self.palm_mesh_verts))/32.0-0.5)*scale
self.palm_mesh_verts=self.palm_mesh_verts[np.argwhere(self.palm_mesh_verts[:,2]>0.075)][:,0,:]
self.palm_mesh_verts=self.palm_mesh_verts[np.argwhere(np.abs(self.palm_mesh_verts[:,1])<0.025)][:,0,:]
self.palm_mesh_verts=self.palm_mesh_verts[np.argwhere(np.abs(self.palm_mesh_verts[:,0])<0.02)][:,0,:]
self.palm_mesh_verts=np.matmul(mesh_off_rot.T, (self.palm_mesh_verts-mesh_off_trans).T).T
trans_mat=np.eye(4)
trans_mat[0:3, 3]=-mesh_off_trans
mesh.apply_transform(trans_mat)
trans_mat=np.eye(4)
trans_mat[0:3, 0:3]=mesh_off_rot.transpose()
mesh.apply_transform(trans_mat)
self.included_meshes.append(mesh)
self.geom_names.append(geom_name)
elif geom_id<len(mesh_list) and mesh_list[geom_id] is not None:
mesh=mesh_list[geom_id]
self.included_meshes.append(mesh)
self.geom_names.append(geom_name)
#2: transform voxel grids into scene, round into global voxel grid
#3: transform predicted voxel into global grid, comput intersection with known voxels, return non-intersecting
#4: marching cubes to convert voxels to mesh
self.camera_params={'fx':579.4112549695428, 'fy':579.4112549695428, 'img_width':640, 'img_height': 480}
self.past_poses=None
self.past_voxels_scales_translations=None
self.tracking_max_distance=0.25
make_known_meshes(self.included_meshes, physics, self.geom_names)
#remove intersections between predicted objects
def subtract_mesh_hull_no_stability_loss(self, resolve_dense_pointcloud, meshes_sub, cam_mat, translation, cam_pos, scale, inv_cm):
world_translation=np.matmul(cam_mat, translation)
dense_ptcld_cpy=resolve_dense_pointcloud-cam_pos-world_translation
dense_ptcld_cpy=np.round((dense_ptcld_cpy/scale+0.5)*128.0).astype(int)
voxels=np.zeros((128,128,128), dtype=int)
voxels[dense_ptcld_cpy[:, 0],dense_ptcld_cpy[:, 1],dense_ptcld_cpy[:, 2]]=1
verts, faces, normals, values = measure.marching_cubes_lewiner(
voxels, mesh_level, spacing=(1 / 128, 1 / 128, 1 / 128))
verts=(verts-0.5)*scale
verts=verts+cam_pos+world_translation
outside_mesh=np.ones(resolve_dense_pointcloud.shape[0])
for known_mesh in meshes_sub:
outside_mesh=np.logical_and(outside_mesh, 1-known_mesh.ray.contains_points(resolve_dense_pointcloud).astype(int))
pcd_tree = KDTree(known_mesh.vertices)
gt_pred_nn_dists, gt_pred_nn_inds=pcd_tree.query(resolve_dense_pointcloud)
outside_mesh=np.where(np.ndarray.flatten(gt_pred_nn_dists)<0.05, 0, outside_mesh)
dense_ptcld=resolve_dense_pointcloud[np.argwhere(outside_mesh)[:, 0]]
dense_ptcld=dense_ptcld-cam_pos-world_translation
dense_ptcld=np.round((dense_ptcld/scale+0.5)*128.0).astype(int)
voxels=np.zeros((128,128,128), dtype=int)
voxels[dense_ptcld[:, 0],dense_ptcld[:, 1],dense_ptcld[:, 2]]=1
if dense_ptcld.shape[0]>0:
verts, faces, normals, values = measure.marching_cubes_lewiner(
voxels, mesh_level, spacing=(1 / 128, 1 / 128, 1 / 128))
verts=(verts-0.5)*scale
else:
verts=np.zeros([0,3])
faces=np.zeros([0,3])
normals=np.zeros([0,3])
if verts.shape[0]>0:
mesh = trimesh.Trimesh(vertices=verts, faces=faces, vertex_normals=normals)
trimesh.repair.fix_inversion(mesh)
mesh.visual.face_colors = [109, 95, 119, 255]
if mesh.faces.shape[0]>0 and mesh.mass>10e-6:
combined_mesh=None
decomps=trimesh.decomposition.convex_decomposition(mesh, maxNumVerticesPerCH=1024, concavity=0.0025, resolution=500000)
if not isinstance(decomps, list):
decomps=[decomps]
new_decomps=[]
num_vertices=0
for decomp in decomps:
if decomp.mass>10e-9:
new_decomps.append(decomp)
num_vertices+=decomp.vertices.shape[0]
if combined_mesh==None:
combined_mesh=decomp
else:
combined_mesh+=decomp
print('num_vertices', num_vertices)
decomps=new_decomps
return decomps, combined_mesh
else:
return None, None
else:
return None, None
def projection_baseline(self, pointcloud):
projected_pointcloud=np.copy(pointcloud)
occupied_z_inds=np.argwhere(np.any(pointcloud, axis=2))
projected_pointcloud[occupied_z_inds[:,0], occupied_z_inds[:,1], 0]=1
return projected_pointcloud
#remove predicted mesh itnersections with robot and table
def refine_mesh_no_stability_loss(self, cam_mat, translation, gt_mesh, cam_pos, scale, pred_voxels, inv_cm, known_meshes, refine=False):
world_translation=np.matmul(cam_mat, translation)
transform_mesh=gt_mesh.copy()
transform=np.eye(4)
transform[:3,3]=-cam_pos-world_translation
transform_mesh.apply_transform(transform)
inv_cam_mat=np.linalg.inv(cam_mat)
transform=np.eye(4)
transform[:3, :3]=inv_cam_mat
transform_mesh.apply_transform(transform)
scale_mat=np.eye(4)
scale_mat=scale_mat/scale
scale_mat[3,3]=1.0
transform_mesh.apply_transform(scale_mat)
ground_truth_voxels=voxel.voxelize_model_binvox(transform_mesh, 128, self.save_id, binvox_add_param='-bb -.5 -.5 -.5 .5 .5 .5', use_cuda_vox=self.use_cuda_vox)
mesh_losses={}
try:
gt_points=np.argwhere(ground_truth_voxels)
pred_voxels=pred_voxels[0]
if self.simulate_model_quality:
pred_voxels, self.cd=self.change_model_quality(pred_voxels, ground_truth_voxels, scale/128.0)
thres_pred_voxels=pred_voxels>=mesh_level
thres_pred_points=np.argwhere(thres_pred_voxels)
if refine:
pcd_tree = KDTree(gt_points)
pred_gt_nn_dists, pred_gt_nn_inds=pcd_tree.query(thres_pred_points)
pred_gt_nn_dists=(pred_gt_nn_dists/128.0)*scale
pred_gt_nn_inds=pred_gt_nn_inds[:,0]
pcd_tree = KDTree(thres_pred_points)
gt_pred_nn_dists, gt_pred_nn_inds=pcd_tree.query(gt_points)
gt_pred_nn_dists=(gt_pred_nn_dists/128.0)*scale
gt_pred_nn_inds=gt_pred_nn_inds[:,0]
pg_loss=np.sum(pred_gt_nn_dists)/thres_pred_points.shape[0]
gp_loss=np.sum(gt_pred_nn_dists)/gt_points.shape[0]
mesh_losses['chamfer']=pg_loss+gp_loss
except:
print('gt voxels projection error!')
traceback.print_exc()
thres_pred_points=np.argwhere(pred_voxels>=mesh_level)
dense_ptcld=(thres_pred_points/128.0-0.5)*scale
dense_ptcld=dense_ptcld+world_translation+cam_pos
outside_mesh=np.ones(dense_ptcld.shape[0])
for known_mesh in known_meshes:
outside_mesh=np.logical_and(outside_mesh, 1-known_mesh.ray.contains_points(dense_ptcld).astype(int))
dense_ptcld=dense_ptcld[np.argwhere(outside_mesh)[:, 0]]
dense_ptcld=dense_ptcld[np.argwhere(dense_ptcld[:,2]>=0.3)[:, 0]]
resolve_dense_ptcld=np.copy(dense_ptcld)
dense_ptcld=dense_ptcld-cam_pos-world_translation
dense_ptcld=np.round((dense_ptcld/scale+0.5)*128.0).astype(int)
voxels= | np.zeros((128,128,128), dtype=int) | numpy.zeros |
# -*- coding: utf-8 -*-
"""
@author: <NAME>
@description: Module containing functions to calculate derivatives, averages, decompositions, vorticity.
@contact: <EMAIL>
"""
# Imports
import numpy as np
import warnings
# Functions
def avg_z(u):
"""
Return the span-wise spatial average of a three-dimensional field.
If the passed u is spanwise periodic use trapz()/(n-1). Otherwise mean().
:param u: The field to be spanwise-averaged.
:return: Return the span-wise spatial average of a three-dimensional field.
"""
from scipy.integrate import trapz, simps
if not len(u.shape)==3:
warnings.warn("Field not 3D. Returning same array.")
return u
else:
if np.array_equal(u[..., 0], u[..., -1]): # Periodic on last axis
return trapz(u, axis=2)/(u.shape[2]-1)
# return simps(u, axis=2)/(u.shape[2]-1)
else:
print('hi')
return u.mean(axis=2)
def avg_z2(u):
"""
Return the span-wise spatial average of a three-dimensional field using Simpson rule.
If the passed u is spanwise periodic use trapz()/(n-1). Otherwise mean().
:param u: The field to be spanwise-averaged.
:return: Return the span-wise spatial average of a three-dimensional field.
"""
from scipy.integrate import trapz, simps
if not len(u.shape)==3:
warnings.warn("Field not 3D. Returning same array.")
return u
else:
sum = np.zeros(u.shape[:2])
for i in np.arange(u.shape[0]):
print(i)
for j in np.arange(u.shape[1]):
for k in np.arange(4, u.shape[2]-4):
sum[i,j] = sum[i,j] + u[i,j,k]
sum[i,j] = sum[i,j] + 1/48*(17*u[i,j,0]+59*u[i,j,1]+43*u[i,j,2]+49*u[i,j,3]+
49*u[i,j,-4]+43*u[i,j,-3]+59*u[i,j,-2]+17*u[i,j,-1])
sum[i,j] = sum[i,j]/(u.shape[2]-1)
return sum
def avg_z_1D(u):
"""
Return the span-wise spatial average of a three-dimensional field.
If the passed u is spanwise periodic use trapz()/(n-1). Otherwise mean().
:param u: The field to be spanwise-averaged.
:return: Return the span-wise spatial average of a three-dimensional field.
"""
from scipy.integrate import trapz, simps, quadrature
from scipy.interpolate import Rbf,InterpolatedUnivariateSpline
N = len(u)
print(N)
# Mean
a1 = np.mean(u)
# Rectangular rule
a2 = 0
for i in range(0,len(u)-1):
a2 = a2 + (u[i]+u[i+1])/2
a2 = a2/(N-1)
# Trapz
a3 = trapz(u)/(u.size-1)
# Simps
a4 = simps(u)/(u.size-1)
# Simps2
a5 = 0
for i in range(4,len(u)-4):
a5 = a5 + u[i]
a5 = a5 + 1/48*(17*u[0]+59*u[1]+43*u[2]+49*u[3]+49*u[-4]+43*u[-3]+59*u[-2]+17*u[-1])
a5 = a5/(N-1)
# Gaussian quad with rbf
f = InterpolatedUnivariateSpline(np.arange(N),u)
a6, err = quadrature(f, 0, N-1)
a6 = a6/(N-1)
return a1, a2, a3, a4, a5, a6
def make_periodicZ(u, **kwargs): # Methods t = True (add layer), t = False (substitue last layer with 1st layer)
add = kwargs.get('add', True)
if add:
u_temp = np.zeros((u.shape[0], u.shape[1], u.shape[2] + 1))
u_temp[..., :-1], u_temp[..., -1] = u, u[..., 0]
u = u_temp
else:
u[..., -1] = u[..., 0]
return u
def make_periodic(a, axis):
b = np.expand_dims(np.take(a, indices=0, axis=axis), axis=axis)
return np.concatenate((a, b), axis=axis)
def decomp_z(u):
"""
:param u: field to be decomposed in z direction.
:return: the average and the fluctuating parts of a three-dimensional field spatially averaged
in the span-wise direction.
"""
if not len(u.shape)==3:
raise ValueError("Fields must be three-dimensional")
else:
u_avg = avg_z(u)
u_f = u-u_avg[:, :, None]
return u_avg, u_f
def ddx(u, x=None, acc=1):
"""
:param u: n-dimensional field.
:return: the first- or second-acc derivative in the i direction of (n>=1 dimensional) field.
"""
if x is not None:
if acc == 2:
return np.gradient(u, x, axis=0, edge_order=2)
else:
raise ValueError('Only 2nd accuracy acc considered when x is included')
else:
if acc == 2:
return np.gradient(u, axis=0, edge_order=2)
elif acc == 1:
a = np.diff(u, n=1, axis=0)
if u.ndim == 1:
return np.append(0, a)
elif u.ndim == 2:
return np.concatenate((np.zeros(u.shape[1])[None, :,], a), axis=0)
elif u.ndim == 3:
return np.concatenate((np.zeros(u.shape[1:])[None, :, :], a), axis=0)
else:
raise ValueError('Only arrays with dimensions <=3 considered.')
else:
raise ValueError('Only 1st or 2nd accuracy acc considered.')
def ddy(u, y=None, acc=1):
"""
:param u: n-dimensional field.
:return: the first-acc derivative in the j direction of (n>=2 dimensional) field.
"""
if y is not None:
if acc == 2:
return np.gradient(u, y, axis=1, edge_order=2)
else:
raise ValueError('Only 2nd accuracy acc considered when y is included')
else:
if acc == 2:
return np.gradient(u, axis=1, edge_order=2)
elif acc == 1:
a = np.diff(u, n=1, axis=1)
if u.ndim == 2:
return np.concatenate((np.zeros(u.shape[0])[:, None], a), axis=1)
elif u.ndim == 3:
return np.concatenate((np.zeros((u.shape[0], u.shape[2]))[:, None, :], a), axis=1)
else:
raise ValueError('Only arrays with dimensions >=2 considered.')
else:
raise ValueError('Only 1st or 2nd accuracy acc considered.')
def ddz(u, z=None, acc=1):
"""
:param u: n-dimensional field.
:return: the first-acc derivative in the j direction of (n>=2 dimensional) field.
"""
if not np.array_equal(u[..., 0], u[..., -1]): # Field is not periodic
if z is not None:
if acc == 2:
return np.gradient(u, z, axis=2, edge_order=2)
else:
raise ValueError('Only 2nd accuracy acc considered when z is included')
else:
if acc == 2:
return np.gradient(u, axis=2, edge_order=2)
elif acc == 1:
a = np.diff(u, n=1, axis=2)
if u.ndim == 3:
return np.concatenate((np.zeros((u.shape[0], u.shape[1]))[:, :, None], a), axis=2) # Return same shape with zeros in k=0
else:
raise ValueError('Only arrays with dimensions =3 considered.')
else:
raise ValueError('Only 1st or 2nd accuracy order considered.')
else: # Field is periodic
if z is not None:
if acc == 2:
return np.gradient(u, z, axis=2, edge_order=2)
else:
raise ValueError('Only 2nd accuracy acc considered when z is included')
else:
u_temp = np.zeros((u.shape[0], u.shape[1], u.shape[2] + 2))
u_temp[:, :, 1:-1], u_temp[:, :, 0], u_temp[:, :, -1] = u, u[:, :, -2], u[:, :, 1]
del u
if z is not None:
if acc == 2:
dudz = | np.gradient(u_temp, z, axis=2, edge_order=2) | numpy.gradient |
"""Script for sampling COV, burstiness and memory coeficient, and
their uncertainties, on many faults and plotting them
<NAME>
University of Otago
2020
"""
import os, sys
import ast
from glob import glob
from operator import itemgetter
from re import finditer
import numpy as np
from scipy.optimize import curve_fit
from scipy.odr import Model, RealData, ODR
import scipy.odr.odrpack as odrpack
from scipy.stats import expon, gamma, weibull_min, ks_2samp, kstest
# !!! Dangerous hack to swap Weibull for gamma
#from scipy.stats import weibull_min as gamma #
# !!!
from matplotlib import pyplot
from matplotlib.patches import PathPatch
import matplotlib.gridspec as gridspec
from matplotlib.ticker import FormatStrFormatter
from scipy.stats import binom, kde
from adjustText import adjust_text
from QuakeRates.dataman.event_dates import EventSet
from QuakeRates.dataman.parse_oxcal import parse_oxcal
from QuakeRates.dataman.parse_age_sigma import parse_age_sigma
from QuakeRates.dataman.parse_params import parse_param_file, \
get_event_sets, file_len
from QuakeRates.utilities.bilinear import bilinear_reg_zero_slope, \
bilinear_reg_fix, bilinear_reg_fix_zero_slope
from QuakeRates.utilities.memory_coefficient import burstiness, memory_coefficient
filepath = '../params'
param_file_list = glob(os.path.join(filepath, '*.txt'))
param_file_list_NZ = ['Akatore_TaylorSilva_2019.txt',
'AlpineHokuriCk_Berryman_2012_simple.txt',
'AlpineSouthWestland_Cochran_2017_simple.txt',
'AwatereEast_Nicol_2016_simple.txt',
'ClarenceEast_Nicol_2016_simple.txt',
'CloudyFault_Nicol_2016_simple.txt',
'Dunstan_GNS_unpub_simple.txt',
'HopeConway_Hatem_2019_simple.txt',
'Hope_Khajavi_2016_simple.txt',
'Ihaia_Nicol_2016_simple.txt',
'Oaonui_Nicol_2016_simple.txt',
'Ohariu_Nicol_2016_simple.txt',
'Paeroa_Nicol_2016_simple.txt',
'Pihama_Nicol_2016_simple.txt',
'PortersPassEast_Nicol_2016_simple.txt',
'Ngakuru_Nicol_2016_simple.txt',
'Mangatete_Nicol_2016_simple.txt',
'Rangipo_Nicol_2016_simple.txt',
'Rotoitipakau_Nicol_2016_simple.txt',
'Rotohauhau_Nicol_2016_simple.txt',
'Snowden_Nicol_2016_simple.txt',
'Vernon_Nicol_2016_simple.txt',
'WairarapaSouth_Nicol_2016_simple.txt',
'Wairau_Nicol_2018_simple.txt',
'Waimana_Nicol_2016_simple.txt',
'Wellington_Langridge_2011_simple.txt',
'Waitangi_GNS_unpub_simple.txt',
'Whakatane_Nicol_2016_simple.txt',
'Whirinaki_Nicol_2016_simple.txt']
# List of faults in study by Williams et al 2019
# Note this is not entirely the same, as there are some records from
# that study that are not included in ours.
param_file_list_W = ['AlpineHokuriCk_Berryman_2012_simple.txt',
'HaywardTysons_Lienkaemper_2007_simple.txt',
'SanJacintoMysticLake_Onderdonk_2018_simple.txt',
'NorthAnatolianElmacik_Fraser_2010_simple.txt',
'SanAndreasWrightwood_Weldon_2004_simple.txt',
'SanAndreasCarizzo_Akciz_2010_simple.txt',
'SanJacintoHogLake_Rockwell_2015_simple.txt',
'SanAndreasMissionCk_Fumal_2002_simple.txt',
'SanAndreasPalletCk_Scharer_2011_simple.txt',
'Xorkoli_Altyn_Tagh_Yuan_2018.txt',
'NorthAnatolianYaylabeli_Kozaci_2011_simple.txt',
'ElsinoreTemecula_Vaughan_1999_simple.txt',
'DeadSeaJordan_Ferry_2011_simple.txt',
'SanAndreasBigBend_Scharer_2017_simple.txt',
'WasatchBrigham_McCalpin_1996_simple.txt',
'Irpinia_Pantosti_1993_simple.txt',
'WasatchWeber_Duross_2011_simple.txt',
'WasatchNilphi_Duross_2017_simple.txt',
'LomaBlanca_Williams_2017_simple.txt',
'AlaskaPWSCopper_Plafker_1994_simple.txt',
'NankaiTrough_Hori_2004_simple.txt',
'CascadiaNth_Adams_1994_simple.txt',
'CascadiaSth_Goldfinger_2003_simple.txt',
'JavonCanyon_SarnaWojicki_1987_simple.txt',
'NewGuinea_Ota_1996_simple.txt',
'ChileMargin_Moernaut_2018_simple.txt']
#param_file_list = []
#for f in param_file_list_NZ:
#for f in param_file_list_W:
# param_file_list.append(os.path.join(filepath, f))
n_samples = 10000 # Number of Monte Carlo samples of the eq chronologies
half_n = int(n_samples/2)
print(half_n)
annotate_plots = False # If True, lable each fault on the plot
plot_folder = './plots'
if not os.path.exists(plot_folder):
os.makedirs(plot_folder)
# Define subset to take
#faulting_styles = ['Reverse']
#faulting_styles = ['Normal']
#faulting_styles = ['Strike_slip']
faulting_styles = ['all']
tectonic_regions = ['all']
#tectonic_regions = ['Intraplate_noncratonic', 'Intraplate_cratonic', 'Near_plate_boundary']
#tectonic_regions = ['Plate_boundary_master', 'Plate_boundary_network']
#tectonic_regions = ['Plate_boundary_network', 'Near_plate_boundary']
#tectonic_regions = ['Plate_boundary_master']
#tectonic_regions = ['Subduction']
#tectonic_regions = ['Near_plate_boundary']
min_number_events = 5 # Use for all other calculations.
min_num_events_mem = 6 # Use for memory coefficient
#Summarise for comment to add to figure filename
fig_comment = ''
#fig_comment = 'NZ_examples_'
#fig_comment = 'Williams2019_'
for f in faulting_styles:
fig_comment += f
fig_comment += '_'
for t in tectonic_regions:
fig_comment += t
fig_comment += '_'
fig_comment += str(min_number_events)
#fig_comment += 'test_add_event_data'
def piecewise_linear(x, x0, y0, k1, k2):
return np.piecewise(x, [x < x0], [lambda x:k1*x + y0-k1*x0, lambda x:k2*x + y0-k2*x0])
def camel_case_split(identifier):
matches = finditer('.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)', identifier)
return [m.group(0) for m in matches]
plot_colours = []
all_ie_times = []
added_events = [] # Store names of records where we've added an event due to
# exceptionally long current open interval
covs = []
cov_bounds = []
burstinesses = []
burstiness_bounds = []
burstiness_stds = []
burstinesses_expon = []
burstinesses_gamma = []
ie_gamma_alpha = []
memory_coefficients = []
memory_bounds = []
memory_stds = []
memory_spearman_coefficients = []
memory_spearman_bounds = []
memory_spearman_lag2_coef = []
memory_spearman_lag2_bounds = []
long_term_rates = []
long_term_rate_stds = []
slip_rates = []
slip_rate_stds = []
slip_rate_bounds = []
max_interevent_times = []
min_interevent_times = []
min_paired_interevent_times = []
std_min_paired_interevent_times = []
std_min_interevent_times = []
std_max_interevent_times = []
max_interevent_times_bounds = []
min_interevent_times_bounds = []
min_paired_interevent_times_bounds = []
ratio_min_pair_max = []
ratio_min_max = []
std_ratio_min_pair_max = []
std_ratio_min_max = []
ratio_min_pair_max_bounds =[]
ratio_min_max_bounds = []
names, event_sets, event_certainties, num_events, tect_regions, fault_styles = \
get_event_sets(param_file_list, tectonic_regions,
faulting_styles, min_number_events)
references = []
# Get citations for each dataset from filename
for s in param_file_list:
sp = s.split('_')
if sp[0].split('/')[2] in names:
references.append(sp[1] + ' ' + sp[2])
n_faults = len(names)
print('Number of faults', n_faults)
for i, event_set in enumerate(event_sets):
# Handle cases with uncertain number of events. Where events identification is
# unsure, event_certainty is given a value of 0, compared with 1 for certain
# events
# First generate chronologies assuming all events are certain
# event_set.name = names[i]
event_set.gen_chronologies(n_samples, observation_end=2020, min_separation=1)
event_set.calculate_cov()
event_set.cov_density()
event_set.memory_coefficient()
event_set.memory_spearman_rank_correlation()
# Store all inter-event times for global statistics
all_ie_times.append(event_set.interevent_times)
# Now calculate some statistics on the sampled chronologies
event_set.basic_chronology_stats()
# Plot histogram of interevent times
figfile = os.path.join(plot_folder, ('interevent_times_%s.png' % names[i]))
event_set.plot_interevent_time_hist(fig_filename=figfile)
# Fit gamma distirbution to event set data
event_set.fit_gamma()
ie_gamma_alpha.append(event_set.mean_gamma_alpha_all) # Get mean estimate of alpha
min_paired_interevent_times.append(event_set.mean_minimum_pair_interevent_time)
max_interevent_times.append(event_set.mean_maximum_interevent_time)
min_interevent_times.append(event_set.mean_minimum_interevent_time)
std_min_paired_interevent_times.append(event_set.std_minimum_pair_interevent_time)
std_min_interevent_times.append(event_set.std_minimum_interevent_time)
std_max_interevent_times.append(event_set.std_maximum_interevent_time)
if event_set.std_maximum_interevent_time == 0:
print('Zero std_maximum_interevent_time for ', names[i])
slip_rates.append(event_set.slip_rates[0])
slip_rate_bounds.append([event_set.slip_rates[1], event_set.slip_rates[2]])
slip_rate_stds.append(abs(np.log10(event_set.slip_rates[2]) - \
np.log10(event_set.slip_rates[1]))/4) # Approx from 95% intervals
max_interevent_times_bounds.append([abs(event_set.mean_maximum_interevent_time -
event_set.maximum_interevent_time_lb),
abs(event_set.mean_maximum_interevent_time -
event_set.maximum_interevent_time_ub)])
min_interevent_times_bounds.append([abs(event_set.mean_minimum_interevent_time -
event_set.minimum_interevent_time_lb),
abs(event_set.mean_minimum_interevent_time -
event_set.minimum_interevent_time_ub)])
min_paired_interevent_times_bounds.append([abs(event_set.mean_minimum_pair_interevent_time -
event_set.minimum_pair_interevent_time_lb),
abs(event_set.mean_minimum_pair_interevent_time -
event_set.minimum_pair_interevent_time_ub)])
ratio_min_pair_max.append(event_set.mean_ratio_min_pair_max)
ratio_min_max.append(event_set.mean_ratio_min_max)
std_ratio_min_pair_max.append(event_set.std_ratio_min_pair_max)
std_ratio_min_max.append(event_set.std_ratio_min_max)
ratio_min_pair_max_bounds.append([abs(event_set.mean_ratio_min_pair_max -
event_set.ratio_min_pair_max_lb),
abs(event_set.mean_ratio_min_pair_max -
event_set.ratio_min_pair_max_ub)])
ratio_min_max_bounds.append([abs(event_set.mean_ratio_min_max -
event_set.ratio_min_max_lb),
abs(event_set.mean_ratio_min_max -
event_set.ratio_min_max_ub)])
# Generate random exponentially and gamma distributed samples of length num_events - 1
# i.e. the number of inter-event times in the chronology. These will be used
# later for testing
scale = 100 # Fix scale, as burstiness is independent of scale for exponentiall distribution
ie_times_expon = expon(scale=scale).rvs(size=(n_samples*(event_set.num_events-1)))
ie_times_expon = np.reshape(np.array(ie_times_expon), (n_samples, (event_set.num_events-1)))
ie_times_expon_T = ie_times_expon.T
burst_expon = burstiness(ie_times_expon_T)
# Gamma
alpha_g = 2.3 #2.2 #1.6 ##2.35 #2.4 #2.0
ie_times_g = gamma(alpha_g, scale=scale).rvs(size=(n_samples*(event_set.num_events-1)))
ie_times_g = np.reshape(np.array(ie_times_g), (n_samples, (event_set.num_events-1)))
ie_times_g_T = ie_times_g.T
burst_g = burstiness(ie_times_g_T)
# Now generate chronologies assuming uncertain events did not occur
if sum(event_certainties[i]) < event_set.num_events:
indices = np.where(event_certainties[i] == 1)
indices = list(indices[0])
# print(indices[0], type(indices))
events_subset = list(itemgetter(*indices)(event_set.event_list))
event_set_certain = EventSet(events_subset)
event_set_certain.name = names[i]
event_set_certain.gen_chronologies(n_samples, observation_end=2019, min_separation=1)
event_set_certain.calculate_cov()
event_set_certain.cov_density()
event_set_certain.basic_chronology_stats()
event_set_certain.memory_coefficient()
event_set_certain.memory_spearman_rank_correlation()
# Generate random exponentially distributed samples of length num_events - 1
# i.e. the number of inter-event times in the chronology. These will be used
# later for testing
ie_times_expon_certain = expon(scale=scale).rvs(size=(n_samples*(len(indices)-1)))
ie_times_expon_certain = np.reshape(np.array(ie_times_expon_certain), (n_samples, (len(indices)-1)))
ie_times_expon_certain_T = ie_times_expon_certain.T
burst_expon_certain = burstiness(ie_times_expon_certain_T)
ie_times_g_certain = gamma(alpha_g, scale=scale).rvs(size=(n_samples*(event_set.num_events-1)))
ie_times_g_certain = np.reshape(np.array(ie_times_g_certain), (n_samples, (event_set.num_events-1)))
ie_times_g_certain_T = ie_times_g_certain.T
burst_g_certain = burstiness(ie_times_g_T)
# Now combine results from certain chronolgies with uncertain ones
combined_covs = np.concatenate([event_set.covs[:half_n],
event_set_certain.covs[:half_n]])
combined_burstiness = np.concatenate([event_set.burstiness[:half_n],
event_set_certain.burstiness[:half_n]])
combined_memory = np.concatenate([event_set.mem_coef[:half_n],
event_set_certain.mem_coef[:half_n]])
combined_memory_spearman = np.concatenate([event_set.rhos[:half_n],
event_set_certain.rhos[:half_n]])
combined_memory_spearman_lag2 = np.concatenate([event_set.rhos2[:half_n],
event_set_certain.rhos2[:half_n]])
combined_burst_expon = np.concatenate([burst_expon[:half_n],
burst_expon_certain[:half_n]])
combined_burst_g = np.concatenate([burst_g[:half_n],
burst_g_certain[:half_n]])
covs.append(combined_covs)
burstinesses.append(combined_burstiness)
memory_coefficients.append(combined_memory)
memory_stds.append(np.std(np.array(combined_memory)))
memory_spearman_coefficients.append(combined_memory_spearman)
memory_spearman_lag2_coef.append(combined_memory_spearman_lag2)
burstinesses_expon.append(combined_burst_expon)
burstinesses_gamma.append(combined_burst_g)
cov_bounds.append([abs(np.mean(combined_covs) - \
min(event_set.cov_lb, event_set_certain.cov_lb)),
abs(np.mean(combined_covs) - \
max(event_set.cov_ub, event_set_certain.cov_ub))])
burstiness_bounds.append([abs(np.mean(combined_burstiness) - \
min(event_set.burstiness_lb,
event_set_certain.burstiness_lb)),
abs(np.mean(combined_burstiness) - \
max(event_set.burstiness_ub,
event_set_certain.burstiness_ub))])
memory_bounds.append([abs(np.mean(combined_memory) - \
min(event_set.memory_lb,
event_set_certain.memory_lb)),
abs(np.mean(combined_memory) - \
max(event_set.memory_ub,
event_set_certain.memory_ub))])
memory_spearman_bounds.append([abs(np.mean(combined_memory_spearman) - \
min(event_set.rho_lb,
event_set_certain.rho_lb)),
abs(np.mean(combined_memory_spearman) - \
max(event_set.rho_ub,
event_set_certain.rho_ub))])
memory_spearman_lag2_bounds.append([abs(np.mean(combined_memory_spearman_lag2) - \
min(event_set.rho2_lb,
event_set_certain.rho2_lb)),
abs(np.mean(combined_memory_spearman_lag2) - \
max(event_set.rho2_ub,
event_set_certain.rho2_ub))])
# Combine, taking n/2 samples from each set
combined_ltrs = np.concatenate([event_set.long_term_rates[:half_n],
event_set_certain.long_term_rates[:half_n]])
burstiness_stds.append(np.std(combined_burstiness))
print(len(combined_ltrs))
long_term_rates.append(combined_ltrs)
long_term_rate_stds.append(np.std(combined_ltrs))
else:
covs.append(event_set.covs)
burstinesses.append(event_set.burstiness)
memory_coefficients.append(event_set.mem_coef)
memory_stds.append(np.std(np.array(event_set.mem_coef)))
memory_spearman_coefficients.append(event_set.rhos)
memory_spearman_lag2_coef.append(event_set.rhos2)
long_term_rates.append(event_set.long_term_rates)
burstinesses_expon.append(burst_expon)
burstinesses_gamma.append(burst_g)
cov_bounds.append([abs(event_set.mean_cov - event_set.cov_lb),
abs(event_set.mean_cov - event_set.cov_ub)])
burstiness_bounds.append([abs(event_set.mean_burstiness - event_set.burstiness_lb),
abs(event_set.mean_burstiness - event_set.burstiness_ub)])
memory_bounds.append([abs(event_set.mean_mem_coef - event_set.memory_lb),
abs(event_set.mean_mem_coef - event_set.memory_ub)])
memory_spearman_bounds.append([abs(event_set.mean_rho - event_set.rho_lb),
abs(event_set.mean_rho - event_set.rho_ub)])
memory_spearman_lag2_bounds.append([abs(event_set.mean_rho2 - event_set.rho2_lb),
abs(event_set.mean_rho2 - event_set.rho2_ub)])
burstiness_stds.append(event_set.std_burstiness)
long_term_rate_stds.append(np.mean(long_term_rates))
# Get colours for plotting later
if event_set.faulting_style == 'Normal':
plot_colours.append('r')
elif event_set.faulting_style == 'Reverse':
plot_colours.append('b')
elif event_set.faulting_style == 'Strike_slip':
plot_colours.append('g')
else:
plot_colours.append('k')
if event_set.add_events: # List of records where we model long open interval
added_events.append(event_set.name)
# Convert to numpy arrays and transpose where necessary
num_events = np.array(num_events)
all_ie_times = np.array(all_ie_times)
max_interevent_times = np.array(max_interevent_times)
min_interevent_times = np.array(min_interevent_times)
min_paired_interevent_times = np.array(min_paired_interevent_times)
std_max_interevent_times = np.array(std_max_interevent_times)
std_min_interevent_times = np.array(std_min_interevent_times)
std_min_paired_interevent_times = np.array(std_min_paired_interevent_times)
max_interevent_times_bounds = np.array(max_interevent_times_bounds).T
min_interevent_times_bounds = np.array(min_interevent_times_bounds).T
min_paired_interevent_times_bounds = np.array(min_paired_interevent_times_bounds).T
long_term_rates_T = np.array(long_term_rates).T
mean_ltr = np.mean(long_term_rates_T, axis = 0)
long_term_rate_stds = np.array(long_term_rate_stds)
slip_rates = np.array(slip_rates).T
slip_rate_bounds = np.array(slip_rate_bounds).T
slip_rate_stds = np.array(slip_rate_stds).T
print('Mean_ltr', mean_ltr)
std_ltr = np.std(long_term_rates_T, axis = 0)
ltr_bounds = np.array([abs(mean_ltr - (np.percentile(long_term_rates_T, 2.5, axis=0))),
abs(mean_ltr - (np.percentile(long_term_rates_T, 97.5, axis=0)))])
ratio_min_pair_max = np.array(ratio_min_pair_max)
ratio_min_max = np.array(ratio_min_max)
std_ratio_min_pair_max = np.array(std_ratio_min_pair_max)
std_ratio_min_max = np.array(std_ratio_min_max)
ratio_min_pair_max_bounds = np.array(ratio_min_pair_max_bounds).T
ratio_min_max_bounds = np.array(ratio_min_max_bounds).T
cov_bounds = np.array(cov_bounds).T
burstiness_bounds = np.array(burstiness_bounds).T
burstiness_stds = np.array(burstiness_stds)
burstiness_expon = np.array(burstinesses_expon)
burstiness_gamma = np.array(burstinesses_gamma)
inds = np.where(num_events >= min_num_events_mem) # Get memory coefficients for more than 6 events
memory_coefficients = np.array(memory_coefficients)
memory_coefficients_min = memory_coefficients[inds]
memory_stds = np.array(memory_stds)
memory_stds_min = memory_stds[inds]
memory_bounds_min = np.array(memory_bounds)[inds].T
memory_bounds = np.array(memory_bounds).T
memory_spearman_bounds = np.array(memory_spearman_bounds).T
memory_spearman_lag2_bounds = np.array(memory_spearman_lag2_bounds).T
ie_gamma_alpha = np.array(ie_gamma_alpha)
# Now plot the means and 95% error bars of COV
pyplot.clf()
ax = pyplot.subplot(111)
mean_covs = []
for i, cov_set in enumerate(covs):
mean_cov = np.mean(cov_set)
mean_covs.append(mean_cov)
colours = []
for mean_cov in mean_covs:
if mean_cov <= 0.9:
colours.append('b')
elif mean_cov > 0.9 and mean_cov <= 1.1:
colours.append('g')
else:
colours.append('r')
pyplot.errorbar(mean_ltr, mean_covs,
xerr = ltr_bounds,
ecolor = '0.3',
elinewidth=0.7,
linestyle="None",
zorder=1)
pyplot.errorbar(mean_ltr, mean_covs,
yerr = cov_bounds,
ecolor = '0.3',
elinewidth=0.7,
linestyle="None",
zorder=1)
pyplot.scatter(mean_ltr, mean_covs, marker = 's', c=plot_colours,
s=25, zorder=2)
for i, txt in enumerate(names):
if max_interevent_times[i] > 10 and annotate_plots:
ax.annotate(txt[:4],
(mean_ltr[i], mean_covs[i]),
fontsize=8)
ax.set_ylim([0, 2.5])
ax.set_xlim([1./1000000, 1./40])
ax.set_xscale('log')
ax.set_xlabel('Long-term rate (events per year)')
ax.set_ylabel('COV')
figname = 'mean_cov_vs_lt_rate_%s.png' % fig_comment
pyplot.savefig(figname)
################################
# Plot burstiness against mean ltr
pyplot.clf()
ax = pyplot.subplot(111)
mean_bs = []
for i, b_set in enumerate(burstinesses):
mean_b = np.mean(b_set)
mean_bs.append(mean_b)
colours = []
for mean_b in mean_bs:
if mean_b <= -0.05:
colours.append('b')
elif mean_b > -0.05 and mean_b <= 0.05:
colours.append('g')
else:
colours.append('r')
pyplot.errorbar(mean_ltr, mean_bs,
xerr = ltr_bounds,
ecolor = '0.3',
elinewidth=0.7,
linestyle="None",
zorder=1)
pyplot.errorbar(mean_ltr, mean_bs,
yerr = burstiness_bounds,
ecolor = '0.3',
elinewidth=0.7,
linestyle="None",
zorder=1)
pyplot.scatter(mean_ltr, mean_bs, marker = 's', c=plot_colours,
s=25, zorder=2)
for i, txt in enumerate(names):
if max_interevent_times[i] > 10 and annotate_plots:
ax.annotate(txt[:4],
(mean_ltr[i], mean_bs[i]),
fontsize=8)
# Add B=0 linear
pyplot.plot([1./1000000, 1./40], [0, 0], linestyle='dashed', linewidth=1, c='0.5')
ax.set_xscale('log')
ax.set_xlabel('Long-term rate (events per year)')
ax.set_ylabel('B')
# Now do a bi-linear fit to the data
mean_bs = np.array(mean_bs)
indices = np.flatnonzero(mean_ltr > 3e-4)
indices = indices.flatten()
indices_slow_faults = np.flatnonzero(mean_ltr <= 3e-4)
indices_slow_faults = indices_slow_faults.flatten()
# Fit fast rate faults
lf = np.polyfit(np.log10(mean_ltr[indices]),
mean_bs[indices], 1)
# Now force to be a flat line1
lf[0] = 0.
lf[1] = np.mean(mean_bs[indices])
std_lf = np.std(mean_bs[indices])
xvals_short = np.arange(1.5e-4, 2e-2, 1e-4)
yvals = lf[0]*np.log10(xvals_short) + lf[1]
pyplot.plot(xvals_short, yvals, c='0.2')
# Fit slow faults
if len(indices_slow_faults > 1):
lf_slow = np.polyfit(np.log10(mean_ltr[indices_slow_faults]),
mean_bs[indices_slow_faults], 1)
xvals_short = np.arange(1e-6, 1.5e-4, 1e-6)
yvals = lf_slow[0]*np.log10(xvals_short) + lf_slow[1]
pyplot.plot(xvals_short, yvals, c='0.2')
# Add formula for linear fits of data
print('Fits for B vs LTR')
txt = 'Y = {:=+6.2f} +/- {:4.2f}'.format(lf[1], std_lf)
print(txt)
ax.annotate(txt, (2e-4, 0.2), fontsize=8)
try:
txt = 'Y = {:4.2f}Log(x) {:=+6.2f}'.format(lf_slow[0], lf_slow[1])
print(txt)
ax.annotate(txt, (1.5e-6, 0.75), fontsize=8)
except:
pass
# Now try bilinear ODR linear fit
data = odrpack.RealData(np.log10(mean_ltr), mean_bs,
sx=np.log10(long_term_rate_stds), sy=burstiness_stds)
bilin = odrpack.Model(bilinear_reg_zero_slope)
odr = odrpack.ODR(data, bilin, beta0=[-3, -1.0, -4]) # array are starting values
odr.set_job(fit_type=0)
out = odr.run()
print(out.sum_square)
out.pprint()
a = out.beta[0]
b = out.beta[1]
hx = out.beta[2]
xvals = np.arange(1.e-6, 2e-2, 1e-6)
yrng = a*np.log10(xvals) + b #10**(b + a * xvals)
ylevel = a*hx + b #10**(b + a * hx)
print('ylevel', ylevel)
print(10**ylevel)
idx = xvals > 10**hx
yrng[idx] = (ylevel)
print('yrng', yrng)
print('hx', hx)
pyplot.plot(xvals, yrng, c='g')
# Bilinear fixed hinge
hxfix = np.log10(2e-4)
bilin_hxfix_cons_slope = odrpack.Model(bilinear_reg_fix_zero_slope)
odr = odrpack.ODR(data, bilin_hxfix_cons_slope, beta0=[-3, -1.0])
odr.set_job(fit_type=0)
out = odr.run()
print('bilinear hxfix_cons_slope')
print(out.sum_square)
out.pprint()
a = out.beta[0]
b = out.beta[1]
yrng = a*np.log10(xvals) + b
ylevel = a*hxfix + b
print('ylevel hxfix zero slope', ylevel)
print(10**ylevel)
idx = xvals > 10**hxfix
yrng[idx] = (ylevel)
print('yrng', yrng)
print('hx', hxfix)
pyplot.plot(xvals, yrng, c='r')
figname = 'burstiness_vs_lt_rate_%s.png' % fig_comment
pyplot.savefig(figname)
#########################
# Plot burstiness against slip rate
pyplot.clf()
ax = pyplot.subplot(111)
pyplot.errorbar(slip_rates, mean_bs,
xerr = slip_rate_bounds,
ecolor = '0.3',
elinewidth=0.7,
linestyle="None",
zorder=1)
pyplot.errorbar(slip_rates, mean_bs,
yerr = burstiness_bounds,
ecolor = '0.3',
elinewidth=0.7,
linestyle="None",
zorder=1)
pyplot.scatter(slip_rates, mean_bs, marker = 's', c=plot_colours,
s=25, zorder=2)
ax.set_ylim([-1, 1])
ax.set_xlim([1./1000, 100])
# Add B=0 linear
pyplot.plot([1./1000, 100], [0, 0], linestyle='dashed', linewidth=1, c='0.5')
ax.set_xscale('log')
ax.set_xlabel('Slip rate (mm/yr)')
ax.set_ylabel('B')
# Now try linear ODR linear fit
def f(B, x):
return B[0]*x + B[1]
print(slip_rates)
print(np.log10(slip_rates))
print(slip_rate_stds)
print(np.log10(slip_rate_stds))
print(burstiness_stds)
wd = 1./np.power(burstiness_stds, 2)
print(wd)
we = 1./np.power(slip_rate_stds, 2)
print(we)
# Std dev already in log-space
data = odrpack.RealData(np.log10(slip_rates), mean_bs,
sx=np.sqrt(slip_rate_stds), sy=np.sqrt(burstiness_stds))
linear = odrpack.Model(f)
odr = odrpack.ODR(data, linear, beta0=[-1, -1.0,])
odr.set_job(fit_type=0)
out = odr.run()
out.pprint()
a = out.beta[0]
b = out.beta[1]
xvals = np.arange(1.e-4, 1e2, 1e-2)
yrng = a*np.log10(xvals) + b #10**(b + a * xvals)
pyplot.plot(xvals, yrng, c='0.6')
txt = 'Y = {:4.2f}Log(x) {:=+6.2f}'.format(a, b)
print(txt)
ax.annotate(txt, (1e0, 0.9), color='0.6')
# Now try bilinear fixed hinge
bilin = odrpack.Model(bilinear_reg_fix_zero_slope)
odr = odrpack.ODR(data, bilin, beta0=[-1, -1.0, -1])
odr.set_job(fit_type=0)
out = odr.run()
out.pprint()
a = out.beta[0]
b = out.beta[1]
yrng = a*np.log10(xvals) + b
ylevel = a*hxfix + b
print('ylevel hxfix zero slope', ylevel)
print(10**ylevel)
idx = xvals > 10**hxfix
yrng[idx] = (ylevel)
print('yrng', yrng)
print('hx', hxfix)
pyplot.plot(xvals, yrng, c='0.2')
txt = 'Y = {:4.2f}Log(x) {:=+6.2f}, x < {:4.2f}'.format(a, b, np.power(10,hxfix))
print(txt)
ax.annotate(txt, (2e-3, 0.9), color='0.2')
txt = 'Y = {:4.2f}, x >= {:4.2f}'.format(ylevel, np.power(10,hxfix))
print(txt)
ax.annotate(txt, (1.2e-2, 0.8), color='0.2')
figname = 'burstiness_vs_slip_rate_%s.png' % fig_comment
pyplot.savefig(figname)
figname = 'burstiness_vs_slip_rate_%s.pdf' % fig_comment
pyplot.savefig(figname)
# Plot memory coefficients against long term rates
pyplot.clf()
ax = pyplot.subplot(111)
mean_mems = []
mean_ltr_mem = mean_ltr[inds]
ltr_bounds_mem = ltr_bounds.T[inds].T
for i, mem_set in enumerate(memory_coefficients):
mean_mem = np.mean(mem_set)
# print('Mean memory coefficient combined', mean_mem)
mean_mems.append(mean_mem)
mean_mems = np.array(mean_mems)
colours = []
plot_colours_mem = list(np.array(plot_colours)[inds])
for mean_mem in mean_mems:
if mean_mem <= -0.05:
colours.append('b')
elif mean_mem > -0.05 and mean_mem <= 0.05:
colours.append('g')
else:
colours.append('r')
pyplot.errorbar(mean_ltr_mem, mean_mems[inds],
xerr = ltr_bounds_mem,
ecolor = '0.3',
elinewidth=0.7,
linestyle="None",
zorder=1)
pyplot.errorbar(mean_ltr_mem, mean_mems[inds],
yerr = memory_bounds_min,
ecolor = '0.3',
elinewidth=0.7,
linestyle="None",
zorder=1)
pyplot.scatter(mean_ltr_mem, mean_mems[inds], marker = 's', c=plot_colours_mem,
s=25, zorder=2)
for i, txt in enumerate(names):
if max_interevent_times[i] > 10 and annotate_plots:
ax.annotate(txt[:4],
(mean_ltr[i], mean_mems[i]),
fontsize=8)
ax.set_xlim([1./1000000, 1./40])
ax.set_xscale('log')
ax.set_xlabel('Long-term rate (events per year)')
ax.set_ylabel('M')
figname = 'memory_coefficient_vs_lt_rate_%s.png' % fig_comment
pyplot.savefig(figname)
# Plot Spearman Rank coefficients against long term rates
pyplot.clf()
ax = pyplot.subplot(111)
mean_mems_L1 = []
for i, mem_set in enumerate(memory_spearman_coefficients):
mean_mem = np.mean(mem_set)
mean_mems_L1.append(mean_mem)
colours = []
for mean_mem in mean_mems_L1:
if mean_mem <= -0.05:
colours.append('b')
elif mean_mem > -0.05 and mean_mem <= 0.05:
colours.append('g')
else:
colours.append('r')
pyplot.errorbar(mean_ltr, mean_mems_L1,
xerr = ltr_bounds,
ecolor = '0.3',
elinewidth=0.7,
linestyle="None",
zorder=1)
pyplot.errorbar(mean_ltr, mean_mems_L1,
yerr = memory_spearman_bounds,
elinewidth=0.7,
ecolor = '0.3',
linestyle="None",
zorder=1)
pyplot.scatter(mean_ltr, mean_mems_L1, marker = 's', c=plot_colours,
s=25, zorder=2)
for i, txt in enumerate(names):
if max_interevent_times[i] > 10 and annotate_plots:
ax.annotate(txt[:4],
(mean_ltr[i], mean_mems_L1[i]),
fontsize=8)
ax.set_xlim([1./1000000, 1./40])
ax.set_xscale('log')
ax.set_xlabel('Long-term rate (events per year)')
ax.set_ylabel('M (Spearman Rank)')
figname = 'memory_coefficient_Spearman_vs_lt_rate_%s.png' % fig_comment
pyplot.savefig(figname)
# Plot Spearman Rank (Lag-2) coefficients against long term rates
pyplot.clf()
ax = pyplot.subplot(111)
mean_mems_L2 = []
for i, mem_set in enumerate(memory_spearman_lag2_coef):
mean_mem = np.mean(mem_set)
mean_mems_L2.append(mean_mem)
colours = []
for mean_mem in mean_mems_L2:
if mean_mem <= -0.05:
colours.append('b')
elif mean_mem > -0.05 and mean_mem <= 0.05:
colours.append('g')
else:
colours.append('r')
pyplot.errorbar(mean_ltr, mean_mems_L2,
xerr = ltr_bounds,
ecolor = '0.3',
elinewidth=0.7,
linestyle="None",
zorder=1)
pyplot.errorbar(mean_ltr, mean_mems_L2,
yerr = memory_spearman_lag2_bounds,
ecolor = '0.3',
elinewidth=0.7,
linestyle="None",
zorder=1)
pyplot.scatter(mean_ltr, mean_mems_L2, marker = 's', c=plot_colours,
s=25, zorder=2)
for i, txt in enumerate(names):
if max_interevent_times[i] > 10 and annotate_plots:
ax.annotate(txt[:4],
(mean_ltr[i], mean_mems_L2[i]),
fontsize=8)
ax.set_xlim([1./1000000, 1./40])
ax.set_xscale('log')
ax.set_xlabel('Long-term rate (events per year)')
ax.set_ylabel('M (Spearman Rank Lag-2)')
figname = 'memory_coefficient_Spearman_Lag2_vs_lt_rate_%s.png' % fig_comment
pyplot.savefig(figname)
# Plot Spearman rank Lag-1 against Lag-2
# Plot Spearman Rank coefficients against long term rates
pyplot.clf()
ax = pyplot.subplot(111)
colours = []
for mean_mem in mean_mems_L1:
if mean_mem <= -0.05:
colours.append('b')
elif mean_mem > -0.05 and mean_mem <= 0.05:
colours.append('g')
else:
colours.append('r')
pyplot.errorbar(mean_mems_L1, mean_mems_L2,
xerr = memory_spearman_bounds,
ecolor = '0.3',
elinewidth=0.7,
linestyle="None",
zorder=1)
pyplot.errorbar(mean_mems_L1, mean_mems_L2,
yerr = memory_spearman_lag2_bounds,
ecolor = '0.3',
elinewidth=0.7,
linestyle="None",
zorder=1)
pyplot.scatter(mean_mems_L1, mean_mems_L2, marker = 's', c=plot_colours,
s=25, zorder=2)
for i, txt in enumerate(names):
if max_interevent_times[i] > 10 and annotate_plots:
ax.annotate(txt[:4],
(mean_mems_L1[i], mean_mems_L2[i]),
fontsize=8)
ax.set_xlabel('M (Spearman Rank Lag-1)')
ax.set_ylabel('M (Spearman Rank Lag-2)')
figname = 'memory_coefficient_Spearman_L1_vs_L2_%s.png' % fig_comment
pyplot.savefig(figname)
# Plot COV against number of events to look at sampling biases
pyplot.clf()
ax = pyplot.subplot(111)
mean_covs = []
for i, cov_set in enumerate(covs):
mean_cov = np.mean(cov_set)
mean_covs.append(mean_cov)
colours = []
for mean_cov in mean_covs:
if mean_cov <= 0.9:
colours.append('b')
elif mean_cov > 0.9 and mean_cov <= 1.1:
colours.append('g')
else:
colours.append('r')
pyplot.errorbar(mean_covs, num_events,
xerr = cov_bounds,
ecolor = '0.6',
linestyle="None")
pyplot.scatter(mean_covs, num_events, marker = 's', c=plot_colours, s=25)
for i, txt in enumerate(names):
if max_interevent_times[i] > 10 and annotate_plots:
ax.annotate(txt[:4],
(mean_covs[i], num_events[i]),
fontsize=8)
ax.set_xlabel('COV')
ax.set_ylabel('Number of events in earthquake record')
figname = 'mean_cov_vs_number_events_%s.png' % fig_comment
pyplot.savefig(figname)
# Now plot basic statistics
pyplot.clf()
ax = pyplot.subplot(111)
pyplot.errorbar(max_interevent_times, min_interevent_times,
yerr = min_interevent_times_bounds,
ecolor = '0.3',
elinewidth=0.7,
linestyle="None",
zorder=1)
pyplot.errorbar(max_interevent_times, min_interevent_times,
xerr = max_interevent_times_bounds,
ecolor = '0.3',
elinewidth=0.7,
linestyle="None",
zorder=1)
pyplot.scatter(max_interevent_times, min_interevent_times,
marker = 's', c=colours, s=25, zorder=2)
ax.set_xlabel('Maximum interevent time')
ax.set_ylabel('Minimum interevent time')
ax.set_xscale('log')
ax.set_yscale('log')
# Label low-slip rate faults
for i, txt in enumerate(names):
if max_interevent_times[i] > 10 and annotate_plots:
ax.annotate(txt[:4],
(max_interevent_times[i], min_interevent_times[i]),
fontsize=8)
# Linear fit only bottom end of data
indices = np.argwhere(max_interevent_times < 10000).flatten()
indices_slow_faults = np.argwhere(max_interevent_times >= 10000).flatten()
lf = np.polyfit(np.log10(max_interevent_times[indices]),
np.log10(min_interevent_times[indices]), 1)
xvals_short = np.arange(100, 1e4, 100)
log_yvals = lf[0]*np.log10(xvals_short) + lf[1]
yvals = np.power(10, log_yvals)
pyplot.plot(xvals_short, yvals)
# Add formula for linear fit to low-end of data
txt = 'Log(Y) = %.2fLog(x) + %.2f' % (lf[0], lf[1])
print(txt)
ax.annotate(txt, (800, 10000))
figname = 'min_vs_max_interevent_time_%s.png' % fig_comment
pyplot.savefig(figname)
# Plot minimum pairs
pyplot.clf()
ax = pyplot.subplot(111)
pyplot.errorbar(max_interevent_times, min_paired_interevent_times,
yerr = min_paired_interevent_times_bounds,
ecolor = '0.3',
elinewidth=0.7,
linestyle="None",
zorder=1)
pyplot.errorbar(max_interevent_times, min_paired_interevent_times,
xerr = max_interevent_times_bounds,
ecolor = '0.3',
elinewidth=0.7,
linestyle="None",
zorder=1)
pyplot.scatter(max_interevent_times, min_paired_interevent_times,
marker = 's', c=colours, s=25, zorder=2)
ax.set_xlabel('Maximum interevent time')
ax.set_ylabel('Minimum interevent time \n(mean of two shortest consecutive interevent times)')
ax.set_xscale('log')
ax.set_yscale('log')
# Label low-slip rate faults
for i, txt in enumerate(names):
if max_interevent_times[i] > 10 and annotate_plots:
ax.annotate(txt[:4],
(max_interevent_times[i], min_paired_interevent_times[i]),
fontsize=8)
# Now fit with a regression in log-log space
xvals = np.arange(100, 2e6, 100) # For plotting
# Linear fit
lf = np.polyfit(np.log10(max_interevent_times),
np.log10(min_paired_interevent_times), 1)
log_yvals = lf[0]*np.log10(xvals) + lf[1]
yvals = np.power(10, log_yvals)
pyplot.plot(xvals, yvals)
# Linear fit only bottom end of data
indices = np.argwhere(max_interevent_times < 10000).flatten()
lf = np.polyfit(np.log10(max_interevent_times[indices]),
np.log10(min_paired_interevent_times[indices]), 1)
xvals_short = np.arange(100, 1e4, 100)
log_yvals = lf[0]*np.log10(xvals_short) + lf[1]
yvals = np.power(10, log_yvals)
pyplot.plot(xvals_short, yvals)
# Add formula for linear fit to low-end of data
txt = 'Log(Y) = %.2fLog(x) + %.2f' % (lf[0], lf[1])
print(txt)
ax.annotate(txt, (100, 10000))
# Quadratic fit
qf = np.polyfit(np.log10(max_interevent_times),
np.log10(min_paired_interevent_times), 2)
print(qf)
log_yvals = qf[0]*np.log10(xvals)**2 + qf[1]*np.log10(xvals) + qf[2]
yvals = np.power(10, log_yvals)
pyplot.plot(xvals, yvals)
figname = 'min_pair_vs_max_interevent_time_%s.png' % fig_comment
pyplot.savefig(figname)
# Similar plots, against long term rates
pyplot.clf()
ax = pyplot.subplot(111)
pyplot.errorbar(mean_ltr, min_interevent_times,
yerr = min_interevent_times_bounds,
ecolor = '0.3',
elinewidth=0.7,
linestyle="None",
zorder=1)
pyplot.errorbar(mean_ltr, min_interevent_times,
xerr = ltr_bounds,
ecolor = '0.3',
elinewidth=0.7,
linestyle="None",
zorder=1)
pyplot.scatter(mean_ltr, min_interevent_times,
marker='s', c=colours, s=25, zorder=2)
ax.set_xlabel('Long-term rate')
ax.set_ylabel('Minimum interevent time')
ax.set_xscale('log')
ax.set_yscale('log')
# Label low-slip rate faults
for i, txt in enumerate(names):
if max_interevent_times[i] > 10 and annotate_plots:
ax.annotate(txt[:4],
(mean_ltr[i], min_interevent_times[i]),
fontsize=8)
# Linear fit only bottom end of data
indices = np.argwhere(mean_ltr > 2e-4).flatten()
lf = np.polyfit(np.log10(mean_ltr[indices]),
np.log10(min_interevent_times[indices]), 1)
xvals_short = np.arange(5e-4, 1e-2, 1e-4)
log_yvals = lf[0]*np.log10(xvals_short) + lf[1]
yvals = np.power(10, log_yvals)
pyplot.plot(xvals_short, yvals)
# Add formula for linear fit to low-end of data
txt = 'Log(Y) = %.2fLog(x) + %.2f' % (lf[0], lf[1])
ax.annotate(txt, (1e-4, 10000))
figname = 'min_interevent_time_vs_ltr_%s.png' % fig_comment
pyplot.savefig(figname)
# Plot long term rate against minimum pair
pyplot.clf()
ax = pyplot.subplot(111)
pyplot.errorbar(mean_ltr, min_paired_interevent_times,
yerr = min_paired_interevent_times_bounds,
ecolor = '0.3',
elinewidth=0.7,
linestyle="None",
zorder=1)
pyplot.errorbar(mean_ltr, min_paired_interevent_times,
xerr = ltr_bounds,
ecolor = '0.3',
elinewidth=0.7,
linestyle="None",
zorder=1)
pyplot.scatter(mean_ltr, min_paired_interevent_times,
marker='s', c=colours, s=25, zorder=2)
ax.set_xlabel('Long-term rate')
ax.set_ylabel('Minimum interevent time \n(mean of two shortest consecutive interevent times)')
ax.set_xscale('log')
ax.set_yscale('log')
# Label low-slip rate faults
for i, txt in enumerate(names):
if max_interevent_times[i] > 10 and annotate_plots:
ax.annotate(txt[:4],
(mean_ltr[i], min_paired_interevent_times[i]),
fontsize=8)
# Linear fit only bottom end of data
indices = np.argwhere(mean_ltr > 2e-4).flatten()
lf = np.polyfit(np.log10(mean_ltr[indices]),
np.log10(min_paired_interevent_times[indices]), 1)
xvals_short = np.arange(5e-4, 1e-2, 1e-4)
log_yvals = lf[0]*np.log10(xvals_short) + lf[1]
yvals = np.power(10, log_yvals)
pyplot.plot(xvals_short, yvals)
# Add formula for linear fit to low-end of data
txt = 'Log(Y) = %.2fLog(x) + %.2f' % (lf[0], lf[1])
print(txt)
ax.annotate(txt, (1e-4, 10000))
figname = 'min_pair_vs_ltr_%s.png' % fig_comment
pyplot.savefig(figname)
# Plot long term rate against maximum interevent time
pyplot.clf()
ax = pyplot.subplot(111)
pyplot.errorbar(mean_ltr, max_interevent_times,
yerr = max_interevent_times_bounds,
ecolor = '0.3',
elinewidth=0.7,
linestyle="None",
zorder=1)
pyplot.errorbar(mean_ltr, max_interevent_times,
xerr = ltr_bounds,
ecolor = '0.3',
elinewidth=0.7,
linestyle="None",
zorder=1)
pyplot.scatter(mean_ltr, max_interevent_times,
marker='s', c=plot_colours, s=25, zorder=2)
ax.set_xlabel('Long-term rate')
ax.set_ylabel('Maximum interevent time')
ax.set_xscale('log')
ax.set_yscale('log')
# Label low-slip rate faults
for i, txt in enumerate(names):
if max_interevent_times[i] > 10 and annotate_plots:
ax.annotate(txt[:4],
(mean_ltr[i], max_interevent_times[i]),
fontsize=8)
# Linear fit only bottom end of data
indices = np.argwhere(mean_ltr > 2e-10).flatten() # All data for now
lf = np.polyfit(np.log10(mean_ltr[indices]),
np.log10(max_interevent_times[indices]), 1)
xvals_short = np.arange(2e-6, 1e-2, 1e-6)
log_yvals = lf[0]*np.log10(xvals_short) + lf[1]
yvals = np.power(10, log_yvals)
pyplot.plot(xvals_short, yvals)
# Add formula for linear fit to low-end of data
txt = 'Log(Y) = %.2fLog(x) + %.2f' % (lf[0], lf[1])
print(txt)
ax.annotate(txt, (1e-4, 100000))
figname = 'max_interevent_time_vs_ltr_%s.png' % fig_comment
pyplot.savefig(figname)
# Now plot ratios against long term rates
pyplot.clf()
ax = pyplot.subplot(111)
pyplot.errorbar(mean_ltr, ratio_min_pair_max,
yerr = ratio_min_pair_max_bounds,
ecolor = '0.3',
elinewidth=0.7,
linestyle="None",
zorder=1)
pyplot.errorbar(mean_ltr, ratio_min_pair_max,
xerr = ltr_bounds,
ecolor = '0.3',
elinewidth=0.7,
linestyle="None",
zorder=1)
pyplot.scatter(mean_ltr, ratio_min_pair_max,
marker='s', c=plot_colours, s=25, zorder=2)
ax.set_xlabel('Long-term rate')
ax.set_ylabel('Minimum pair interevent time: maximum interevent time')
ax.set_xscale('log')
ax.set_yscale('log')
# Label low-slip rate faults
for i, txt in enumerate(names):
if max_interevent_times[i] > 10 and annotate_plots:
ax.annotate(txt[:4],
(mean_ltr[i], ratio_min_pair_max[i]),
fontsize=8)
# Linear fit high and low long term rate data separately
indices = np.argwhere(mean_ltr > 4e-4).flatten()
indices_slow_faults = np.argwhere(mean_ltr <= 4e-4).flatten()
lf = np.polyfit(np.log10(mean_ltr[indices]),
np.log10(ratio_min_pair_max[indices]), 1)
xvals_short = np.arange(2e-4, 5e-2, 1e-4)
log_yvals = lf[0]*np.log10(xvals_short) + lf[1]
yvals = np.power(10, log_yvals)
pyplot.plot(xvals_short, yvals, c='k')
# Add formula for linear fit to low-end of data
txt = 'Log(Y) = %.2fLog(x) + %.2f' % (lf[0], lf[1])
print(txt)
ax.annotate(txt, (5e-4, 1e-2))
# Slow long-term rates
print('At if statement')
if len(indices_slow_faults) > 0:
print('Plotting slow faults')
lf = np.polyfit(np.log10(mean_ltr[indices_slow_faults]),
np.log10(ratio_min_pair_max[indices_slow_faults]), 1)
xvals_short = np.arange(2e-6, 4e-4, 1e-6)
log_yvals = lf[0]*np.log10(xvals_short) + lf[1]
yvals = np.power(10, log_yvals)
pyplot.plot(xvals_short, yvals, c='k')
# Add formula for linear fit to low-end of data
txt = 'Log(Y) = %.2fLog(x) + %.2f' % (lf[0], lf[1])
print(txt)
ax.annotate(txt, (1e-5, 5e-3))
figname = 'min_pair_max_ratio_vs_ltr_%s.png' % fig_comment
pyplot.savefig(figname)
# Now plot ratios against long term rates
pyplot.clf()
ax = pyplot.subplot(111)
pyplot.errorbar(mean_ltr, ratio_min_max,
yerr = ratio_min_max_bounds,
ecolor = '0.3',
elinewidth=0.7,
linestyle="None",
zorder=1)
pyplot.errorbar(mean_ltr, ratio_min_max,
xerr = ltr_bounds,
ecolor = '0.3',
elinewidth=0.7,
linestyle="None", zorder=1)
pyplot.scatter(mean_ltr, ratio_min_max,
marker = 's', c=plot_colours, s=25, zorder=2)
ax.set_xlabel('Long-term rate')
ax.set_ylabel('Minimum interevent time: maximum interevent time')
ax.set_xscale('log')
ax.set_yscale('log')
# Label low-slip rate faults
for i, txt in enumerate(names):
if max_interevent_times[i] > 10 and annotate_plots:
ax.annotate(txt[:4],
(mean_ltr[i], ratio_min_max[i]),
fontsize=8)
# Linear fit only bottom end of data
indices = np.argwhere(mean_ltr > 9e-5).flatten()
indices_slow_faults = np.argwhere(mean_ltr <= 9e-5).flatten()
lf = np.polyfit(np.log10(mean_ltr[indices]),
np.log10(ratio_min_max[indices]), 1)
# Now just plot as constant mean value
lf[0] = 0
lf[1] = np.mean(np.log10(ratio_min_max[indices]))
xvals_short = np.arange(3.46e-5, 1e-2, 1e-4)
log_yvals = lf[0]*np.log10(xvals_short) + lf[1]
yvals = np.power(10, log_yvals)
pyplot.plot(xvals_short, yvals, c='k')
# Add formula for linear fit to low-end of data
txt = 'Log(Y) = {:4.2f}Log(x) {:=+6.2f}'.format(lf[0], lf[1])
print(txt)
ax.annotate(txt, (1e-4, 1e-3))
# Slow long-term rates
if len(indices_slow_faults) > 0:
lf = np.polyfit(np.log10(mean_ltr[indices_slow_faults]),
np.log10(ratio_min_max[indices_slow_faults]), 1)
xvals_short = np.arange(2e-6, 3.47e-5, 1e-6)
log_yvals = lf[0]*np.log10(xvals_short) + lf[1]
yvals = np.power(10, log_yvals)
pyplot.plot(xvals_short, yvals, c='k')
# Add formula for linear fit to low-end of data
# txt = 'Log(Y) = %.2fLog(x) %+.2f' % (lf[0], lf[1])
txt = 'Log(Y) = {:4.2f} {:=+6.2f}'.format(lf[0], lf[1])
print(txt)
ax.annotate(txt, (3e-6, 8e-1))
figname = 'min_max_ratio_vs_ltr_%s.png' % fig_comment
pyplot.savefig(figname)
#############################################
# Make multipanel figure plot
pyplot.clf()
fig = pyplot.figure(1)
# set up subplot grid
gridspec.GridSpec(3, 2)
#First plot
pyplot.subplot2grid((3, 2), (0,0), colspan=1, rowspan=1)
ax = pyplot.gca()
# Plot burstiness against mean ltr
pyplot.errorbar(mean_ltr, mean_bs,
xerr = ltr_bounds,
ecolor = '0.3',
elinewidth=0.5,
linestyle="None",
zorder=1)
pyplot.errorbar(mean_ltr, mean_bs,
yerr = burstiness_bounds,
ecolor = '0.3',
elinewidth=0.5,
linestyle="None",
zorder=1)
pyplot.scatter(mean_ltr, mean_bs, marker = 's', c=plot_colours,
s=18, zorder=2)
ax.set_ylim([-1, 1])
ax.set_xlim([1./1000000, 1./40])
pyplot.plot([1./1000000, 1./40], [0, 0], linestyle='dashed', linewidth=1, c='0.5')
ax.set_xscale('log')
ax.set_xlabel('Long-term rate (events per year)', fontsize=10)
ax.set_ylabel('B', fontsize=10)
# Add a legend using some dummy data
line1 = ax.scatter([1], [100], marker = 's', c = 'r', s=18)
line2 = ax.scatter([1], [100], marker = 's', c = 'g', s=18)
line3 = ax.scatter([1], [100], marker = 's', c = 'b', s=18)
pyplot.legend((line1, line2, line3), ('Normal', 'Strike slip', 'Reverse'))
# Bilinear fixed hinge and constant slope ODR
hxfix = | np.log10(2e-4) | numpy.log10 |
import numpy as np
def prn_list(l):
for t in range(len(l)):
print(t, l[t])
# 时间序列循环
p = []
for t in range(100):
p.append(t+1)
# 历史hln:每一段hln做一次操作
# 以[0:t]的数据,预测t+1的值,然后存到新list的t位置
# 例如,t=[0:14]的数据,预测t=15的值,然后存到新list的t=14位置
hln = 15
p1 = []
for t in range(hln, len(p)+1):
tmp = p[t-hln:t]
p1.append(tmp)
#len(p1)=len(p)-hln+1
p2 = [np.mean(p[t-hln:t]) for t in range(hln, len(p)+1)]
#fill historical value with zero or nan
p3 = [np.nan for t in range(0, hln-1)] + p2
# 未来ftn:每一段ftn做一次操作
# 以[t+1:t+ftn]的数据,计算t+1的值,然后存到新list的t位置
# 例如,t=[1:15]的数据,计算t=1的值,然后存到新list的t=0位置
ftn = 25
f1 = []
for t in range(1, len(p)-ftn+1):
tmp = p[t:t+ftn]
f1.append(tmp)
#len(f1)=len(p)-ftn+1
f2 = [np.mean(p[t:t+ftn]) for t in range(1, len(p)-ftn+1)]
f3 = f2 + [np.nan for t in range(0, ftn)]
# 历史hln与未来ftn:每段操作
# 以[0:t]的数据,预测t+1的值,然后存到新list的t位置
# 同时以[t+1:t+ftn]的数据,计算t+1的值,然后也存到新list的t位置
# [0:14]和[15:39]的数据,计算t=15的值,存到t=14的位置
hln = 15
ftn = 25
pf1 = []
for t in range(hln, len(p)-ftn+1):
tmp = p[t-hln:t+ftn]
pf1.append(tmp)
#len(pf1)=len(p)-hln-ftn+1
pf2 = [np.mean(p[t-hln:t+ftn]) for t in range(hln, len(p)-ftn+1)]
pf3 = [np.nan for t in range(0, hln-1)] + \
pf2 + \
[np.nan for t in range(0, ftn)]
# find and remove nan from list
pf4 = []
for t in range(len(pf3)):
if ~ | np.isnan(pf3[t]) | numpy.isnan |
# This module has been generated automatically from space group information
# obtained from the Computational Crystallography Toolbox
#
"""
Space groups
This module contains a list of all the 230 space groups that can occur in
a crystal. The variable space_groups contains a dictionary that maps
space group numbers and space group names to the corresponding space
group objects.
.. moduleauthor:: <NAME> <<EMAIL>>
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2013 The Mosaic Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file LICENSE.txt, distributed as part of this software.
#-----------------------------------------------------------------------------
import numpy as N
class SpaceGroup(object):
"""
Space group
All possible space group objects are created in this module. Other
modules should access these objects through the dictionary
space_groups rather than create their own space group objects.
"""
def __init__(self, number, symbol, transformations):
"""
:param number: the number assigned to the space group by
international convention
:type number: int
:param symbol: the Hermann-Mauguin space-group symbol as used
in PDB and mmCIF files
:type symbol: str
:param transformations: a list of space group transformations,
each consisting of a tuple of three
integer arrays (rot, tn, td), where
rot is the rotation matrix and tn/td
are the numerator and denominator of the
translation vector. The transformations
are defined in fractional coordinates.
:type transformations: list
"""
self.number = number
self.symbol = symbol
self.transformations = transformations
self.transposed_rotations = N.array([N.transpose(t[0])
for t in transformations])
self.phase_factors = N.exp(N.array([(-2j*N.pi*t[1])/t[2]
for t in transformations]))
def __repr__(self):
return "SpaceGroup(%d, %s)" % (self.number, repr(self.symbol))
def __len__(self):
"""
:return: the number of space group transformations
:rtype: int
"""
return len(self.transformations)
def symmetryEquivalentMillerIndices(self, hkl):
"""
:param hkl: a set of Miller indices
:type hkl: Scientific.N.array_type
:return: a tuple (miller_indices, phase_factor) of two arrays
of length equal to the number of space group
transformations. miller_indices contains the Miller
indices of each reflection equivalent by symmetry to the
reflection hkl (including hkl itself as the first element).
phase_factor contains the phase factors that must be applied
to the structure factor of reflection hkl to obtain the
structure factor of the symmetry equivalent reflection.
:rtype: tuple
"""
hkls = N.dot(self.transposed_rotations, hkl)
p = N.multiply.reduce(self.phase_factors**hkl, -1)
return hkls, p
space_groups = {}
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(1, 'P 1', transformations)
space_groups[1] = sg
space_groups['P 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(2, 'P -1', transformations)
space_groups[2] = sg
space_groups['P -1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(3, 'P 1 2 1', transformations)
space_groups[3] = sg
space_groups['P 1 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(4, 'P 1 21 1', transformations)
space_groups[4] = sg
space_groups['P 1 21 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(5, 'C 1 2 1', transformations)
space_groups[5] = sg
space_groups['C 1 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(6, 'P 1 m 1', transformations)
space_groups[6] = sg
space_groups['P 1 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(7, 'P 1 c 1', transformations)
space_groups[7] = sg
space_groups['P 1 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(8, 'C 1 m 1', transformations)
space_groups[8] = sg
space_groups['C 1 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(9, 'C 1 c 1', transformations)
space_groups[9] = sg
space_groups['C 1 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(10, 'P 1 2/m 1', transformations)
space_groups[10] = sg
space_groups['P 1 2/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(11, 'P 1 21/m 1', transformations)
space_groups[11] = sg
space_groups['P 1 21/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(12, 'C 1 2/m 1', transformations)
space_groups[12] = sg
space_groups['C 1 2/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(13, 'P 1 2/c 1', transformations)
space_groups[13] = sg
space_groups['P 1 2/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(14, 'P 1 21/c 1', transformations)
space_groups[14] = sg
space_groups['P 1 21/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(15, 'C 1 2/c 1', transformations)
space_groups[15] = sg
space_groups['C 1 2/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(16, 'P 2 2 2', transformations)
space_groups[16] = sg
space_groups['P 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(17, 'P 2 2 21', transformations)
space_groups[17] = sg
space_groups['P 2 2 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(18, 'P 21 21 2', transformations)
space_groups[18] = sg
space_groups['P 21 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(19, 'P 21 21 21', transformations)
space_groups[19] = sg
space_groups['P 21 21 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(20, 'C 2 2 21', transformations)
space_groups[20] = sg
space_groups['C 2 2 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(21, 'C 2 2 2', transformations)
space_groups[21] = sg
space_groups['C 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(22, 'F 2 2 2', transformations)
space_groups[22] = sg
space_groups['F 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(23, 'I 2 2 2', transformations)
space_groups[23] = sg
space_groups['I 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(24, 'I 21 21 21', transformations)
space_groups[24] = sg
space_groups['I 21 21 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(25, 'P m m 2', transformations)
space_groups[25] = sg
space_groups['P m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(26, 'P m c 21', transformations)
space_groups[26] = sg
space_groups['P m c 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(27, 'P c c 2', transformations)
space_groups[27] = sg
space_groups['P c c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(28, 'P m a 2', transformations)
space_groups[28] = sg
space_groups['P m a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(29, 'P c a 21', transformations)
space_groups[29] = sg
space_groups['P c a 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(30, 'P n c 2', transformations)
space_groups[30] = sg
space_groups['P n c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(31, 'P m n 21', transformations)
space_groups[31] = sg
space_groups['P m n 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(32, 'P b a 2', transformations)
space_groups[32] = sg
space_groups['P b a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(33, 'P n a 21', transformations)
space_groups[33] = sg
space_groups['P n a 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(34, 'P n n 2', transformations)
space_groups[34] = sg
space_groups['P n n 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(35, 'C m m 2', transformations)
space_groups[35] = sg
space_groups['C m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(36, 'C m c 21', transformations)
space_groups[36] = sg
space_groups['C m c 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(37, 'C c c 2', transformations)
space_groups[37] = sg
space_groups['C c c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(38, 'A m m 2', transformations)
space_groups[38] = sg
space_groups['A m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(39, 'A b m 2', transformations)
space_groups[39] = sg
space_groups['A b m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(40, 'A m a 2', transformations)
space_groups[40] = sg
space_groups['A m a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(41, 'A b a 2', transformations)
space_groups[41] = sg
space_groups['A b a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(42, 'F m m 2', transformations)
space_groups[42] = sg
space_groups['F m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(43, 'F d d 2', transformations)
space_groups[43] = sg
space_groups['F d d 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(44, 'I m m 2', transformations)
space_groups[44] = sg
space_groups['I m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(45, 'I b a 2', transformations)
space_groups[45] = sg
space_groups['I b a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(46, 'I m a 2', transformations)
space_groups[46] = sg
space_groups['I m a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(47, 'P m m m', transformations)
space_groups[47] = sg
space_groups['P m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(48, 'P n n n :2', transformations)
space_groups[48] = sg
space_groups['P n n n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(49, 'P c c m', transformations)
space_groups[49] = sg
space_groups['P c c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(50, 'P b a n :2', transformations)
space_groups[50] = sg
space_groups['P b a n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(51, 'P m m a', transformations)
space_groups[51] = sg
space_groups['P m m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(52, 'P n n a', transformations)
space_groups[52] = sg
space_groups['P n n a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(53, 'P m n a', transformations)
space_groups[53] = sg
space_groups['P m n a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(54, 'P c c a', transformations)
space_groups[54] = sg
space_groups['P c c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(55, 'P b a m', transformations)
space_groups[55] = sg
space_groups['P b a m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(56, 'P c c n', transformations)
space_groups[56] = sg
space_groups['P c c n'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(57, 'P b c m', transformations)
space_groups[57] = sg
space_groups['P b c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(58, 'P n n m', transformations)
space_groups[58] = sg
space_groups['P n n m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(59, 'P m m n :2', transformations)
space_groups[59] = sg
space_groups['P m m n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(60, 'P b c n', transformations)
space_groups[60] = sg
space_groups['P b c n'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(61, 'P b c a', transformations)
space_groups[61] = sg
space_groups['P b c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(62, 'P n m a', transformations)
space_groups[62] = sg
space_groups['P n m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(63, 'C m c m', transformations)
space_groups[63] = sg
space_groups['C m c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(64, 'C m c a', transformations)
space_groups[64] = sg
space_groups['C m c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(65, 'C m m m', transformations)
space_groups[65] = sg
space_groups['C m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(66, 'C c c m', transformations)
space_groups[66] = sg
space_groups['C c c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(67, 'C m m a', transformations)
space_groups[67] = sg
space_groups['C m m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(68, 'C c c a :2', transformations)
space_groups[68] = sg
space_groups['C c c a :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(69, 'F m m m', transformations)
space_groups[69] = sg
space_groups['F m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,3,3])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,0,3])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(70, 'F d d d :2', transformations)
space_groups[70] = sg
space_groups['F d d d :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(71, 'I m m m', transformations)
space_groups[71] = sg
space_groups['I m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(72, 'I b a m', transformations)
space_groups[72] = sg
space_groups['I b a m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(73, 'I b c a', transformations)
space_groups[73] = sg
space_groups['I b c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(74, 'I m m a', transformations)
space_groups[74] = sg
space_groups['I m m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(75, 'P 4', transformations)
space_groups[75] = sg
space_groups['P 4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(76, 'P 41', transformations)
space_groups[76] = sg
space_groups['P 41'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(77, 'P 42', transformations)
space_groups[77] = sg
space_groups['P 42'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(78, 'P 43', transformations)
space_groups[78] = sg
space_groups['P 43'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(79, 'I 4', transformations)
space_groups[79] = sg
space_groups['I 4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(80, 'I 41', transformations)
space_groups[80] = sg
space_groups['I 41'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(81, 'P -4', transformations)
space_groups[81] = sg
space_groups['P -4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(82, 'I -4', transformations)
space_groups[82] = sg
space_groups['I -4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(83, 'P 4/m', transformations)
space_groups[83] = sg
space_groups['P 4/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(84, 'P 42/m', transformations)
space_groups[84] = sg
space_groups['P 42/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(85, 'P 4/n :2', transformations)
space_groups[85] = sg
space_groups['P 4/n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(86, 'P 42/n :2', transformations)
space_groups[86] = sg
space_groups['P 42/n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(87, 'I 4/m', transformations)
space_groups[87] = sg
space_groups['I 4/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,5,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(88, 'I 41/a :2', transformations)
space_groups[88] = sg
space_groups['I 41/a :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(89, 'P 4 2 2', transformations)
space_groups[89] = sg
space_groups['P 4 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(90, 'P 4 21 2', transformations)
space_groups[90] = sg
space_groups['P 4 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(91, 'P 41 2 2', transformations)
space_groups[91] = sg
space_groups['P 41 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(92, 'P 41 21 2', transformations)
space_groups[92] = sg
space_groups['P 41 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(93, 'P 42 2 2', transformations)
space_groups[93] = sg
space_groups['P 42 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(94, 'P 42 21 2', transformations)
space_groups[94] = sg
space_groups['P 42 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(95, 'P 43 2 2', transformations)
space_groups[95] = sg
space_groups['P 43 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(96, 'P 43 21 2', transformations)
space_groups[96] = sg
space_groups['P 43 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(97, 'I 4 2 2', transformations)
space_groups[97] = sg
space_groups['I 4 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(98, 'I 41 2 2', transformations)
space_groups[98] = sg
space_groups['I 41 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(99, 'P 4 m m', transformations)
space_groups[99] = sg
space_groups['P 4 m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(100, 'P 4 b m', transformations)
space_groups[100] = sg
space_groups['P 4 b m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(101, 'P 42 c m', transformations)
space_groups[101] = sg
space_groups['P 42 c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(102, 'P 42 n m', transformations)
space_groups[102] = sg
space_groups['P 42 n m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(103, 'P 4 c c', transformations)
space_groups[103] = sg
space_groups['P 4 c c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(104, 'P 4 n c', transformations)
space_groups[104] = sg
space_groups['P 4 n c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(105, 'P 42 m c', transformations)
space_groups[105] = sg
space_groups['P 42 m c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(106, 'P 42 b c', transformations)
space_groups[106] = sg
space_groups['P 42 b c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(107, 'I 4 m m', transformations)
space_groups[107] = sg
space_groups['I 4 m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(108, 'I 4 c m', transformations)
space_groups[108] = sg
space_groups['I 4 c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(109, 'I 41 m d', transformations)
space_groups[109] = sg
space_groups['I 41 m d'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(110, 'I 41 c d', transformations)
space_groups[110] = sg
space_groups['I 41 c d'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(111, 'P -4 2 m', transformations)
space_groups[111] = sg
space_groups['P -4 2 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(112, 'P -4 2 c', transformations)
space_groups[112] = sg
space_groups['P -4 2 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(113, 'P -4 21 m', transformations)
space_groups[113] = sg
space_groups['P -4 21 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(114, 'P -4 21 c', transformations)
space_groups[114] = sg
space_groups['P -4 21 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(115, 'P -4 m 2', transformations)
space_groups[115] = sg
space_groups['P -4 m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(116, 'P -4 c 2', transformations)
space_groups[116] = sg
space_groups['P -4 c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(117, 'P -4 b 2', transformations)
space_groups[117] = sg
space_groups['P -4 b 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(118, 'P -4 n 2', transformations)
space_groups[118] = sg
space_groups['P -4 n 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(119, 'I -4 m 2', transformations)
space_groups[119] = sg
space_groups['I -4 m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(120, 'I -4 c 2', transformations)
space_groups[120] = sg
space_groups['I -4 c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(121, 'I -4 2 m', transformations)
space_groups[121] = sg
space_groups['I -4 2 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(122, 'I -4 2 d', transformations)
space_groups[122] = sg
space_groups['I -4 2 d'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(123, 'P 4/m m m', transformations)
space_groups[123] = sg
space_groups['P 4/m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(124, 'P 4/m c c', transformations)
space_groups[124] = sg
space_groups['P 4/m c c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(125, 'P 4/n b m :2', transformations)
space_groups[125] = sg
space_groups['P 4/n b m :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(126, 'P 4/n n c :2', transformations)
space_groups[126] = sg
space_groups['P 4/n n c :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(127, 'P 4/m b m', transformations)
space_groups[127] = sg
space_groups['P 4/m b m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(128, 'P 4/m n c', transformations)
space_groups[128] = sg
space_groups['P 4/m n c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(129, 'P 4/n m m :2', transformations)
space_groups[129] = sg
space_groups['P 4/n m m :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(130, 'P 4/n c c :2', transformations)
space_groups[130] = sg
space_groups['P 4/n c c :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(131, 'P 42/m m c', transformations)
space_groups[131] = sg
space_groups['P 42/m m c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(132, 'P 42/m c m', transformations)
space_groups[132] = sg
space_groups['P 42/m c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(133, 'P 42/n b c :2', transformations)
space_groups[133] = sg
space_groups['P 42/n b c :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(134, 'P 42/n n m :2', transformations)
space_groups[134] = sg
space_groups['P 42/n n m :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(135, 'P 42/m b c', transformations)
space_groups[135] = sg
space_groups['P 42/m b c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(136, 'P 42/m n m', transformations)
space_groups[136] = sg
space_groups['P 42/m n m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(137, 'P 42/n m c :2', transformations)
space_groups[137] = sg
space_groups['P 42/n m c :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(138, 'P 42/n c m :2', transformations)
space_groups[138] = sg
space_groups['P 42/n c m :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(139, 'I 4/m m m', transformations)
space_groups[139] = sg
space_groups['I 4/m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(140, 'I 4/m c m', transformations)
space_groups[140] = sg
space_groups['I 4/m c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,5,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,5,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,3,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(141, 'I 41/a m d :2', transformations)
space_groups[141] = sg
space_groups['I 41/a m d :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,5,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,5,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(142, 'I 41/a c d :2', transformations)
space_groups[142] = sg
space_groups['I 41/a c d :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(143, 'P 3', transformations)
space_groups[143] = sg
space_groups['P 3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(144, 'P 31', transformations)
space_groups[144] = sg
space_groups['P 31'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(145, 'P 32', transformations)
space_groups[145] = sg
space_groups['P 32'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(146, 'R 3 :H', transformations)
space_groups[146] = sg
space_groups['R 3 :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(147, 'P -3', transformations)
space_groups[147] = sg
space_groups['P -3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(148, 'R -3 :H', transformations)
space_groups[148] = sg
space_groups['R -3 :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(149, 'P 3 1 2', transformations)
space_groups[149] = sg
space_groups['P 3 1 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(150, 'P 3 2 1', transformations)
space_groups[150] = sg
space_groups['P 3 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(151, 'P 31 1 2', transformations)
space_groups[151] = sg
space_groups['P 31 1 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(152, 'P 31 2 1', transformations)
space_groups[152] = sg
space_groups['P 31 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(153, 'P 32 1 2', transformations)
space_groups[153] = sg
space_groups['P 32 1 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(154, 'P 32 2 1', transformations)
space_groups[154] = sg
space_groups['P 32 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(155, 'R 3 2 :H', transformations)
space_groups[155] = sg
space_groups['R 3 2 :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(156, 'P 3 m 1', transformations)
space_groups[156] = sg
space_groups['P 3 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(157, 'P 3 1 m', transformations)
space_groups[157] = sg
space_groups['P 3 1 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(158, 'P 3 c 1', transformations)
space_groups[158] = sg
space_groups['P 3 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(159, 'P 3 1 c', transformations)
space_groups[159] = sg
space_groups['P 3 1 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(160, 'R 3 m :H', transformations)
space_groups[160] = sg
space_groups['R 3 m :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,7])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,7])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,7])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,5])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,5])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,5])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(161, 'R 3 c :H', transformations)
space_groups[161] = sg
space_groups['R 3 c :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(162, 'P -3 1 m', transformations)
space_groups[162] = sg
space_groups['P -3 1 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(163, 'P -3 1 c', transformations)
space_groups[163] = sg
space_groups['P -3 1 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(164, 'P -3 m 1', transformations)
space_groups[164] = sg
space_groups['P -3 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(165, 'P -3 c 1', transformations)
space_groups[165] = sg
space_groups['P -3 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(166, 'R -3 m :H', transformations)
space_groups[166] = sg
space_groups['R -3 m :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,7])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,7])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,7])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,1])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,1])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,1])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,5])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,5])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,5])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,-1])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,-1])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,-1])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(167, 'R -3 c :H', transformations)
space_groups[167] = sg
space_groups['R -3 c :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(168, 'P 6', transformations)
space_groups[168] = sg
space_groups['P 6'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,5])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(169, 'P 61', transformations)
space_groups[169] = sg
space_groups['P 61'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,5])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(170, 'P 65', transformations)
space_groups[170] = sg
space_groups['P 65'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(171, 'P 62', transformations)
space_groups[171] = sg
space_groups['P 62'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(172, 'P 64', transformations)
space_groups[172] = sg
space_groups['P 64'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(173, 'P 63', transformations)
space_groups[173] = sg
space_groups['P 63'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(174, 'P -6', transformations)
space_groups[174] = sg
space_groups['P -6'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(175, 'P 6/m', transformations)
space_groups[175] = sg
space_groups['P 6/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(176, 'P 63/m', transformations)
space_groups[176] = sg
space_groups['P 63/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(177, 'P 6 2 2', transformations)
space_groups[177] = sg
space_groups['P 6 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,5])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,5])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(178, 'P 61 2 2', transformations)
space_groups[178] = sg
space_groups['P 61 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,5])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,5])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(179, 'P 65 2 2', transformations)
space_groups[179] = sg
space_groups['P 65 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(180, 'P 62 2 2', transformations)
space_groups[180] = sg
space_groups['P 62 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(181, 'P 64 2 2', transformations)
space_groups[181] = sg
space_groups['P 64 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(182, 'P 63 2 2', transformations)
space_groups[182] = sg
space_groups['P 63 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(183, 'P 6 m m', transformations)
space_groups[183] = sg
space_groups['P 6 m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(184, 'P 6 c c', transformations)
space_groups[184] = sg
space_groups['P 6 c c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(185, 'P 63 c m', transformations)
space_groups[185] = sg
space_groups['P 63 c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(186, 'P 63 m c', transformations)
space_groups[186] = sg
space_groups['P 63 m c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(187, 'P -6 m 2', transformations)
space_groups[187] = sg
space_groups['P -6 m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(188, 'P -6 c 2', transformations)
space_groups[188] = sg
space_groups['P -6 c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(189, 'P -6 2 m', transformations)
space_groups[189] = sg
space_groups['P -6 2 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(190, 'P -6 2 c', transformations)
space_groups[190] = sg
space_groups['P -6 2 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(191, 'P 6/m m m', transformations)
space_groups[191] = sg
space_groups['P 6/m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(192, 'P 6/m c c', transformations)
space_groups[192] = sg
space_groups['P 6/m c c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(193, 'P 63/m c m', transformations)
space_groups[193] = sg
space_groups['P 63/m c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(194, 'P 63/m m c', transformations)
space_groups[194] = sg
space_groups['P 63/m m c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(195, 'P 2 3', transformations)
space_groups[195] = sg
space_groups['P 2 3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(196, 'F 2 3', transformations)
space_groups[196] = sg
space_groups['F 2 3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(197, 'I 2 3', transformations)
space_groups[197] = sg
space_groups['I 2 3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(198, 'P 21 3', transformations)
space_groups[198] = sg
space_groups['P 21 3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(199, 'I 21 3', transformations)
space_groups[199] = sg
space_groups['I 21 3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(200, 'P m -3', transformations)
space_groups[200] = sg
space_groups['P m -3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(201, 'P n -3 :2', transformations)
space_groups[201] = sg
space_groups['P n -3 :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(202, 'F m -3', transformations)
space_groups[202] = sg
space_groups['F m -3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,3,3])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,3,3])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,3,3])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,0,3])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,0,3])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,0,3])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(203, 'F d -3 :2', transformations)
space_groups[203] = sg
space_groups['F d -3 :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(204, 'I m -3', transformations)
space_groups[204] = sg
space_groups['I m -3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(205, 'P a -3', transformations)
space_groups[205] = sg
space_groups['P a -3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(206, 'I a -3', transformations)
space_groups[206] = sg
space_groups['I a -3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(207, 'P 4 3 2', transformations)
space_groups[207] = sg
space_groups['P 4 3 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(208, 'P 42 3 2', transformations)
space_groups[208] = sg
space_groups['P 42 3 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(209, 'F 4 3 2', transformations)
space_groups[209] = sg
space_groups['F 4 3 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(210, 'F 41 3 2', transformations)
space_groups[210] = sg
space_groups['F 41 3 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(211, 'I 4 3 2', transformations)
space_groups[211] = sg
space_groups['I 4 3 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(212, 'P 43 3 2', transformations)
space_groups[212] = sg
space_groups['P 43 3 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(213, 'P 41 3 2', transformations)
space_groups[213] = sg
space_groups['P 41 3 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,5,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,5,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,5,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,5,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,5,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,5,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(214, 'I 41 3 2', transformations)
space_groups[214] = sg
space_groups['I 41 3 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(215, 'P -4 3 m', transformations)
space_groups[215] = sg
space_groups['P -4 3 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(216, 'F -4 3 m', transformations)
space_groups[216] = sg
space_groups['F -4 3 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(217, 'I -4 3 m', transformations)
space_groups[217] = sg
space_groups['I -4 3 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(218, 'P -4 3 n', transformations)
space_groups[218] = sg
space_groups['P -4 3 n'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(219, 'F -4 3 c', transformations)
space_groups[219] = sg
space_groups['F -4 3 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,5,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,5,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,5,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,5,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,3,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,5,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,5,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(220, 'I -4 3 d', transformations)
space_groups[220] = sg
space_groups['I -4 3 d'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(221, 'P m -3 m', transformations)
space_groups[221] = sg
space_groups['P m -3 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(222, 'P n -3 n :2', transformations)
space_groups[222] = sg
space_groups['P n -3 n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(223, 'P m -3 n', transformations)
space_groups[223] = sg
space_groups['P m -3 n'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(224, 'P n -3 m :2', transformations)
space_groups[224] = sg
space_groups['P n -3 m :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(225, 'F m -3 m', transformations)
space_groups[225] = sg
space_groups['F m -3 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(226, 'F m -3 c', transformations)
space_groups[226] = sg
space_groups['F m -3 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = | N.array([1,4,4]) | numpy.array |
# -*- coding:utf-8 -*-
from __future__ import division, absolute_import
from simple_ml.base.base_model import *
from simple_ml.base.base_enum import *
from simple_ml.base.base_error import *
import numpy as np
from collections import Counter
from simple_ml.evaluation import *
__all__ = [
'ID3',
'CART'
]
class ID3(BaseClassifier):
__doc__ = "ID3 Decision Tree"
def __init__(self, max_depth=None, min_samples_leaf=3):
"""
决策树ID3算法
:param max_depth: 树最大深度
:param min_samples_leaf: 叶子节点最大样本数(最好是奇数,用以投票)
"""
super(ID3, self).__init__()
self.max_depth = max_depth if max_depth is not None else np.inf
self.min_samples_leaf = min_samples_leaf
self._root = None
@property
def root(self):
return self._root
def fit(self, x, y):
super(ID3, self).fit(x, y)
if self.label_type != LabelType.binary and self.label_type != LabelType.multi_class:
raise LabelTypeError("ID3算法只支持离散标签")
if LabelType.continuous in self.feature_type:
raise FeatureTypeError("ID3算法只支持离散特征")
self._root = self._gen_tree(MultiTreeNode(data_id=np.arange(self.x.shape[0])), 0)
def _gen_tree(self, node, depth):
if depth >= self.max_depth or len(node.data_id) <= self.min_samples_leaf:
node.leaf_label = np.argmax(np.bincount(self.y[node.data_id]))
return node
split_feature = self._get_best_split(node.data_id)
feature = self.x[node.data_id, split_feature]
nodes = []
for value in np.unique(feature):
new_node = MultiTreeNode(None, node.data_id[feature == value], split_feature, value)
new_node = self._gen_tree(new_node, depth + 1)
nodes.append(new_node)
node.child = nodes
return node
def _get_best_split(self, data_id):
data = self.x[data_id]
y = self.y[data_id]
best_split_feature = None
y_entropy = self._get_entropy(y)
_max_gain = -np.inf
for i in range(data.shape[1]):
unique = np.unique(data[:, i])
if len(unique) <= 1:
continue
entropy = 0
for feature_value in unique:
y_temp = y[data[:, i] == feature_value]
entropy += len(y_temp) / len(data_id) * self._get_entropy(y_temp)
gain = y_entropy - entropy
if gain > _max_gain:
_max_gain = gain
best_split_feature = i
return best_split_feature
@staticmethod
def _get_entropy(arr):
count = Counter(arr)
s = 0
for i in count:
p = count[i] / len(arr)
s += -p * np.log(p)
return s
def predict(self, x):
if self._root is None:
raise ModelNotFittedError
super(ID3, self).predict(x)
return np.array([self._predict_single(i, self._root) for i in x])
def _predict_single(self, x, node):
if node.leaf_label is not None:
return node.leaf_label
for child_node in node.child:
feature_id = child_node.feature_id
value = child_node.value
if x[feature_id] == value:
return self._predict_single(x, child_node)
return np.random.choice(self.y, 1)
def score(self, x, y):
super(ID3, self).score(x, y)
y_predict = self.predict(x)
return classify_f1(y_predict, y)
def classify_plot(self, x, y, title=""):
classify_plot(self.new(), self.x, self.y, x, y, title=self.__doc__ + title)
def new(self):
return ID3(self.max_depth, self.min_samples_leaf)
class CART(BaseClassifier):
__doc__ = "Classify and Regression Tree"
def __init__(self, max_depth=10, min_samples_leaf=5):
"""
分类回归树
:param max_depth: 树最大深度
:param min_samples_leaf: 叶子节点最大样本数(最好是奇数,用以投票)
"""
super(CART, self).__init__()
self._function = Function.cls_and_reg
self.max_depth = max_depth if max_depth is not None else np.inf
self.min_samples_leaf = min_samples_leaf
self._root = None
@property
def root(self):
return self._root
def fit(self, x, y):
super(CART, self).fit(x, y)
self._root = self._gen_tree(BinaryTreeNode(None, None, | np.arange(self.x.shape[0]) | numpy.arange |
__author__ = 'yzhu'
__version__ = '0.1'
import json
import datetime
import time
import matplotlib.pyplot as plt
from matplotlib.collections import PatchCollection
from matplotlib.patches import Polygon
import numpy as np
from skimage.draw import polygon
import urllib
import copy
import itertools
import os
from pycocotools.coco import COCO
from pycocotools import mask
import PIL.ImageDraw as ImageDraw
import PIL.Image as Image
import cv2
class Amodal(COCO):
def __init__(self, annotation_file=None, verbose=True):
"""
Constructor of Microsoft COCO helper class for reading and visualizing annotations.
:param annotation_file (str): location of annotation file
:param image_folder (str): location to the folder that hosts images.
:return:
"""
COCO.__init__(self, annotation_file)
self.verbose = verbose
def createIndex(self):
# create index
print('creating index...')
anns = {}
imgToAnns = {}
imgs = {}
regions = []
if 'annotations' in self.dataset:
imgToAnns = {ann['image_id']: [] for ann in self.dataset['annotations']}
anns = {ann['id']: [] for ann in self.dataset['annotations']}
for ann in self.dataset['annotations']:
imgToAnns[ann['image_id']] += [ann]
anns[ann['id']] = ann
for region in ann['regions']:
region['image_id'] = ann['image_id']
regions.append(region)
if 'images' in self.dataset:
imgs = {im['id']: {} for im in self.dataset['images']}
for img in self.dataset['images']:
imgs[img['id']] = img
print('index created!')
# create class members
self.anns = anns
self.imgToAnns = imgToAnns
self.imgs = imgs
self.regions = regions
def getAmodalAnnIds(self, imgIds=[]):
"""
Get amodal ann id that satisfy given fiter conditions.
:param imgIds (int array): get anns for given imgs
:return: ids (int array) : integer array of ann ids
"""
imgIds = imgIds if type(imgIds) == list else [imgIds]
if len(imgIds) == 0:
anns = self.dataset['annotations']
else:
lists = [self.imgToAnns[imgId] for imgId in imgIds if imgId in self.imgToAnns]
anns = list(itertools.chain.from_iterable(lists))
ids = [ann['id'] for ann in anns]
return ids
def getImgIds(self, imgIds=[], catIds=[]):
'''
Get img ids that satisfy given filter conditions.
:param imgIds (int array) : get imgs for given ids
:param catIds (int array) : get imgs with all given cats
:return: ids (int array) : integer array of img ids
'''
imgIds = imgIds if type(imgIds) == list else [imgIds]
catIds = catIds if type(catIds) == list else [catIds]
if len(imgIds) == len(catIds) == 0:
ids = self.imgs.keys()
else:
ids = set(imgIds)
for i, catId in enumerate(catIds):
if i == 0 and len(ids) == 0:
ids = set(self.catToImgs[catId])
else:
ids &= set(self.catToImgs[catId])
return list(ids)
def showAmodalAnns(self, anns):
"""
Display a set of amodal Ann object.
:param anns: a dict object
return: None
"""
if type(anns) == list:
print("anns cannot be a list! Should be a dict.")
return 0
ax = plt.gca()
polygons = []
lines = []
color = []
for ann in reversed(anns['regions']):
c = np.random.random((1, 3)).tolist()[0]
if type(ann['segmentation']) == list:
# polygon
seg = ann['segmentation']
poly = np.array(seg).reshape((len(seg)//2, 2))
polygons.append(Polygon(poly, True, alpha=0.2))
color.append(c)
p = PatchCollection(polygons, facecolors=color, edgecolors=(1,1,1,1), linewidths=3, alpha=0.2)
ax.add_collection(p)
#color.append(c)
else:
self.showMask(ann['segmentation'], ax,c)
#raise NotImplementedError
def showEdgeMap(self, anns):
"""
Show edge map for an annontation
:param anns: a dict object
return: None
"""
if type(anns) == list:
print("anns cannot be a list! Should be a dict")
return 0
ax = plt.gca()
polygons = []
lines = []
color = []
for ann in reversed(anns['regions']):
c = np.zeros([1, 3]).tolist()[0]
if type(ann['segmentation']) == list:
# polygon
seg = ann['segmentation']
poly = np.array(seg).reshape((len(seg)//2, 2))
polygons.append(Polygon(poly, True, alpha=0.2))
p = PatchCollection(polygons, facecolors=color, edgecolors=(1,1,1,1), linewidths=1, alpha=1)
ax.add_collection(p)
#color.append(c)
else:
self.showMask(ann['segmentation'], ax)
def getMask(self, M):
m = mask.decode([M])
img = np.ones( (m.shape[0], m.shape[1], 3) )
# get boundary quickly
kernel_size = m.shape[0]//40
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (kernel_size, kernel_size))
dilation = cv2.dilate(m, kernel)
B = dilation[:,:,None] - m
# B = np.zeros( (m.shape[0], m.shape[1]) )
# for aa in range(m.shape[0]-1):
# for bb in range(m.shape[1]-1):
# #kk = aa*m.shape[1]+bb
# if m[aa,bb] != m[aa,bb+1]:
# B[aa,bb], B[aa,bb+1] = 1,1
# if m[aa,bb] != m[aa+1,bb]:
# B[aa,bb], B[aa+1,bb] = 1,1
# if m[aa,bb] != m[aa+1,bb+1]:
# B[aa,bb], B[aa+1,bb+1] = 1,1
return m,B
def showMask(self, M, ax, c = [0, 1, 0]):
m,B = self.getMask(M)
img = np.ones( (m.shape[0], m.shape[1], 3) )
for i in range(3):
img[:, :, i] = c[i];ax.imshow(np.dstack( (img, m*0.5) ))
img[:, :, i] = 1;ax.imshow(np.dstack( (img, B*1) ))
def getAnnMask(self,ann,w,h,fill_color=255):
if type(ann['segmentation']) == list:
# polygon
seg = ann['segmentation']
img = Image.new("L", (w, h))
draw = ImageDraw.Draw(img)
draw.polygon(seg, fill= int(fill_color))
all_mask = np.asarray( img, dtype="uint8" )
else:
all_mask,B = self.getMask(ann['segmentation'])
all_mask = np.squeeze(all_mask)
if 'invisible_mask' in ann:
invisible_mask,boundary = self.getMask(ann['invisible_mask'])
invisible_mask[invisible_mask>0] = fill_color
invisible_mask = np.squeeze(invisible_mask)
invisible_mask = np.squeeze(invisible_mask)
return all_mask,invisible_mask.astype('uint8')
else:
return all_mask,[]
def getAnnMask2(self,ann,w,h,fill_color=255):
if type(ann['segmentation']) == list:
# polygon
seg = ann['segmentation']
img = Image.new("L", (w, h))
draw = ImageDraw.Draw(img)
draw.polygon(seg, fill= int(fill_color))
all_mask = np.asarray( img, dtype="uint8" )
else:
all_mask,B = self.getMask(ann['segmentation'])
all_mask = np.squeeze(all_mask)
if 'visible_mask' in ann:
visible_mask,boundary = self.getMask(ann['visible_mask'])
visible_mask[visible_mask>0] = fill_color
visible_mask = np.squeeze(visible_mask)
visible_mask = np.squeeze(visible_mask)
return all_mask,visible_mask.astype('uint8')
else:
return all_mask,[]
def getAmodalInstance(self,anns,w,h,k=-1):
"""
return k-th visualable/unvisualable mask
k: the depth order of anns, 1-index. If k = -1, just visulize mask
"""
fill_color = 255
if type(anns) == list:
print("ann cannot be a list! Should be a dict")
return 0
if k < 0:
layer_visible_mask = np.ones((h,w))*255
for ann in anns['regions']:
all_mask,invisible_mask,_ = self.getAnnMask(ann,w,h,fill_color)
if type(invisible_mask)==list:
layer_visible_mask += all_mask
else:
layer_visible_mask += all_mask-invisible_mask
return layer_visible_mask.astype('uint8')
else:
ann = anns['regions'][k]
return self.getAnnMask(ann,w,h,fill_color)
def showAmodalInstance(self, anns, k=-1):
"""
Display k-th instance only: print segmentation first, then print invisible_mask
anns: a single annotation
k: the depth order of anns, 1-index. If k = -1, just visulize input
"""
ax = plt.gca()
c = | np.random.random((1,3)) | numpy.random.random |
import tensorflow as tf
import numpy as np
import time
import datetime
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
class VAE(object):
def __init__(self):
# VAE parameters
self.z_dim = 10
# Iterations parameters
self.max_it = 300000
self.stat_every = 500
self.saving_every = 1e8
# Directories
date = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
model_name = 'vae_dsprites'
self.data_path = 'database/'
self.model_path = 'results/' + model_name + '_' + date + '/'
self.checkpoint_path = self.model_path + 'checkpoints/model'
self.tb_path = self.model_path + 'tb_summaries/'
# Data
self.data_file = self.data_path + 'dsprites_ndarray_co1sh3sc6or40x32y32_64x64.npz'
self.data_train, self.data_test, self.all_imgs, self.all_factors, self.n_classes = self._data_init()
self.iterator, self.handle, self.train_img_ph, self.iterator_train, self.test_img_ph, self.iterator_test =\
self._iterator_init(batch_size=64)
# Model setup
self.input_vae, self.enc_mean, self.enc_logvar, self.z_sample, self.dec_logit, self.dec_sigm, \
self.dec_mean_logit, self.dec_mean_sigm = self._vae_init(inputs=self.iterator.get_next())
self.vae_loss, self.recon_loss = self._loss_init()
self.vae_train_step = self._optimizer_init()
# Savers
self.sess = tf.Session()
self.saver = tf.train.Saver(keep_checkpoint_every_n_hours=1)
self.train_writer = tf.summary.FileWriter(self.tb_path + 'train', self.sess.graph)
self.test_writer = tf.summary.FileWriter(self.tb_path + 'test')
tf.summary.scalar('vae_loss', self.vae_loss)
tf.summary.scalar('recon_loss', self.recon_loss)
# Initialization of variables
self.sess.run(tf.global_variables_initializer())
# Initialization of iterators
self.sess.run([self.iterator_train.initializer, self.iterator_test.initializer],
feed_dict={self.train_img_ph: self.data_train, self.test_img_ph: self.data_test})
# Initialization of handles
self.train_handle = self.sess.run(self.iterator_train.string_handle())
self.test_handle = self.sess.run(self.iterator_test.string_handle())
def train(self, final_evaluation=False):
start_time = time.time()
merged = tf.summary.merge_all()
print("Beginning training")
print("Beginning training", file=open(self.model_path + 'train.log', 'w'))
it = 0
while it < self.max_it:
it += 1
self.sess.run(self.vae_train_step, feed_dict={self.handle: self.train_handle})
if it % self.stat_every == 0:
# Train evaluation
vae_loss, recon_loss, summ = self.sess.run([self.vae_loss, self.recon_loss, merged],
feed_dict={self.handle: self.train_handle})
print("Iteration %i (train):\n VAE loss %f - Recon loss %f" % (it, vae_loss, recon_loss), flush=True)
print("Iteration %i (train):\n VAE loss %f - Recon loss %f" % (it, vae_loss, recon_loss),
flush=True, file=open(self.model_path + 'train.log', 'a'))
self.train_writer.add_summary(summ, it)
# Test evaluation
vae_loss, recon_loss, summ = self.sess.run([self.vae_loss, self.recon_loss, merged],
feed_dict={self.handle: self.test_handle})
print("Iteration %i (test):\n VAE loss %f - Recon loss %f" % (it, vae_loss, recon_loss), flush=True)
print("Iteration %i (test):\n VAE loss %f - Recon loss %f" % (it, vae_loss, recon_loss),
flush=True, file=open(self.model_path + 'train.log', 'a'))
self.test_writer.add_summary(summ, it)
time_usage = str(datetime.timedelta(seconds=int(round(time.time() - start_time))))
print("Time usage: " + time_usage)
print("Time usage: " + time_usage, file=open(self.model_path + 'train.log', 'a'))
if it % self.saving_every == 0:
save_path = self.saver.save(self.sess, self.checkpoint_path, global_step=it)
print("Model saved to: %s" % save_path)
print("Model saved to: %s" % save_path, file=open(self.model_path + 'train.log', 'a'))
save_path = self.saver.save(self.sess, self.checkpoint_path, global_step=it)
print("Model saved to: %s" % save_path)
print("Model saved to: %s" % save_path, file=open(self.model_path + 'train.log', 'a'))
# Closing savers
self.train_writer.close()
self.test_writer.close()
# Total time
time_usage = str(datetime.timedelta(seconds=int(round(time.time() - start_time))))
print("Total training time: " + time_usage)
print("Total training time: " + time_usage, file=open(self.model_path + 'train.log', 'a'))
if final_evaluation:
print("Evaluating final model...")
mean_dis_metric = self.evaluate_mean_disentanglement()
recon_loss_test = self.evaluate_test_recon_loss()
print("Mean Disentanglement Metric: " + str(mean_dis_metric),
file=open(self.model_path + 'train.log', 'a'))
print("Test Reconstruction Loss: " + str(recon_loss_test),
file=open(self.model_path + 'train.log', 'a'))
def load_latest_checkpoint(self, params_path):
self.saver.restore(self.sess, tf.train.latest_checkpoint(params_path))
def _data_init(self):
# Find dataset here: https://github.com/deepmind/dsprites-dataset
with np.load(self.data_file, encoding='bytes') as data:
all_imgs = data['imgs']
all_imgs = all_imgs[:, :, :, None] # make into 4d tensor
all_factors = data['latents_classes']
all_factors = all_factors[:, 1:] # Remove color factor
n_classes = np.array([3, 6, 40, 32, 32])
# 90% random test/train split
n_data = all_imgs.shape[0]
idx_random = np.random.permutation(n_data)
data_train = all_imgs[idx_random[0: (9 * n_data) // 10]]
data_test = all_imgs[idx_random[(9 * n_data) // 10:]]
return data_train, data_test, all_imgs, all_factors, n_classes
def _iterator_init(self, batch_size=64):
with tf.name_scope("iterators"):
# Generate TF Dataset objects for each split
train_img_ph = tf.placeholder(dtype=tf.float32, shape=self.data_train.shape)
test_img_ph = tf.placeholder(dtype=tf.float32, shape=self.data_test.shape)
dataset_train = tf.data.Dataset.from_tensor_slices(train_img_ph)
dataset_test = tf.data.Dataset.from_tensor_slices(test_img_ph)
dataset_train = dataset_train.repeat()
dataset_test = dataset_test.repeat()
# Random batching
dataset_train = dataset_train.shuffle(buffer_size=5000)
dataset_train = dataset_train.batch(batch_size=batch_size)
dataset_test = dataset_test.shuffle(buffer_size=1000)
dataset_test = dataset_test.batch(batch_size=batch_size)
# Prefetch
dataset_train = dataset_train.prefetch(buffer_size=4)
dataset_test = dataset_test.prefetch(buffer_size=4)
# Iterator for each split
iterator_train = dataset_train.make_initializable_iterator()
iterator_test = dataset_test.make_initializable_iterator()
# Global iterator
handle = tf.placeholder(tf.string, shape=[])
iterator = tf.data.Iterator.from_string_handle(
handle, dataset_train.output_types, dataset_train.output_shapes)
return iterator, handle, train_img_ph, iterator_train, test_img_ph, iterator_test
def _vae_init(self, inputs):
with tf.name_scope("vae"):
# Input
input_vae = inputs
# Encoder network
enc_mean, enc_logvar = self._encoder_init(inputs=input_vae)
with tf.name_scope("sampling"):
# Reparameterisation trick
with tf.name_scope("noise"):
noise = tf.random_normal(shape=tf.shape(enc_mean))
with tf.name_scope("variance"):
variance = tf.exp(enc_logvar / 2)
with tf.name_scope("reparam_trick"):
z_sample = tf.add(enc_mean, (variance * noise))
# Decoder network
dec_logit, dec_sigm = self._decoder_init(inputs=z_sample)
# Non-random decoder
dec_mean_logit, dec_mean_sigm = self._decoder_init(inputs=enc_mean, reuse=True)
return input_vae, enc_mean, enc_logvar, z_sample, dec_logit, dec_sigm, dec_mean_logit, dec_mean_sigm
def _encoder_init(self, inputs, reuse=False):
with tf.variable_scope("encoder"):
e_1 = tf.layers.conv2d(inputs=inputs,
filters=32,
kernel_size=4,
strides=2,
activation=tf.nn.relu,
padding="same",
name="enc_conv_1",
reuse=reuse)
e_2 = tf.layers.conv2d(inputs=e_1,
filters=32,
kernel_size=4,
strides=2,
activation=tf.nn.relu,
padding="same",
name="enc_conv_2",
reuse=reuse)
e_3 = tf.layers.conv2d(inputs=e_2,
filters=64,
kernel_size=4,
strides=2,
activation=tf.nn.relu,
padding="same",
name="enc_conv_3",
reuse=reuse)
e_4 = tf.layers.conv2d(inputs=e_3,
filters=64,
kernel_size=4,
strides=2,
activation=tf.nn.relu,
padding="same",
name="enc_conv_4",
reuse=reuse)
with tf.name_scope("enc_flatten"):
dim = np.prod(e_4.get_shape().as_list()[1:])
e_4_flat = tf.reshape(e_4, shape=(-1, dim))
e_5 = tf.layers.dense(inputs=e_4_flat,
units=128,
activation=None,
name="enc_fc_1",
reuse=reuse)
enc_mean = tf.layers.dense(inputs=e_5,
units=self.z_dim,
activation=None,
name="enc_fc_2_mean",
reuse=reuse)
enc_logvar = tf.layers.dense(inputs=e_5,
units=self.z_dim,
activation=None,
name="enc_fc_2_logvar",
reuse=reuse)
return enc_mean, enc_logvar
def _decoder_init(self, inputs, reuse=False):
with tf.variable_scope("decoder"):
d_1 = tf.layers.dense(inputs=inputs,
units=128,
activation=tf.nn.relu,
name="dec_fc_1",
reuse=reuse)
d_2 = tf.layers.dense(inputs=d_1,
units=4*4*64,
activation=tf.nn.relu,
name="dec_fc_2",
reuse=reuse)
with tf.name_scope("dec_reshape"):
d_2_reshape = tf.reshape(d_2, shape=[-1, 4, 4, 64])
d_3 = tf.layers.conv2d_transpose(inputs=d_2_reshape,
filters=64,
kernel_size=4,
strides=2,
activation=tf.nn.relu,
padding="same",
name="dec_upconv_1",
reuse=reuse)
d_4 = tf.layers.conv2d_transpose(inputs=d_3,
filters=32,
kernel_size=4,
strides=2,
activation=tf.nn.relu,
padding="same",
name="dec_upconv_2",
reuse=reuse)
d_5 = tf.layers.conv2d_transpose(inputs=d_4,
filters=32,
kernel_size=4,
strides=2,
activation=tf.nn.relu,
padding="same",
name="dec_upconv_3",
reuse=reuse)
dec_logit = tf.layers.conv2d_transpose(inputs=d_5,
filters=1,
kernel_size=4,
strides=2,
activation=None,
padding="same",
name="dec_upconv_4",
reuse=reuse)
dec_sigm = tf.sigmoid(dec_logit, name="dec_sigmoid_out")
return dec_logit, dec_sigm
def _loss_init(self):
with tf.name_scope("loss"):
with tf.name_scope("reconstruction_loss"):
# Reconstruction loss is bernoulli in each pixel
im_flat = tf.reshape(self.input_vae, shape=[-1, 64 * 64 * 1])
logits_flat = tf.reshape(self.dec_logit, shape=[-1, 64 * 64 * 1])
by_pixel_recon = tf.nn.sigmoid_cross_entropy_with_logits(logits=logits_flat, labels=im_flat)
by_example_recon = tf.reduce_sum(by_pixel_recon, axis=1)
recon_loss = tf.reduce_mean(by_example_recon, name="recon_loss")
with tf.name_scope("kl_loss"):
# KL against N(0,1) is 0.5 * sum_j ( var_j - logvar_j + mean^2_j - 1 )
with tf.name_scope("variance"):
variance = tf.exp(self.enc_logvar)
with tf.name_scope("squared_mean"):
squared_mean = self.enc_mean ** 2
with tf.name_scope("kl_divergence"):
sum_argument = variance - self.enc_logvar + squared_mean
by_example_kl = 0.5 * tf.reduce_sum(sum_argument, axis=1) - self.z_dim
kl_divergence = tf.reduce_mean(by_example_kl, name="kl_divergence")
with tf.name_scope("total_vae_loss"):
vae_loss = recon_loss + kl_divergence
return vae_loss, recon_loss
def _optimizer_init(self):
with tf.name_scope("optimizer"):
with tf.name_scope("vae_optimizer"):
enc_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='encoder')
dec_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='decoder')
vae_vars = enc_vars + dec_vars
vae_optimizer = tf.train.AdamOptimizer(learning_rate=1e-4,
beta1=0.9,
beta2=0.999)
vae_train_step = vae_optimizer.minimize(self.vae_loss, var_list=vae_vars)
return vae_train_step
def evaluate_mean_disentanglement(self):
n_tries = 5
dis_metric = 0
print("Evaluating disentanglement with "+str(n_tries)+" tries.")
for i in range(n_tries):
this_disen_metric = self.evaluate_disentanglement()
print(str(i+1)+"/"+str(n_tries)+" Disentanglement Metric: "+str(this_disen_metric))
dis_metric = dis_metric + this_disen_metric
dis_metric = dis_metric / n_tries
print("Mean Disentanglement Metric: "+str(dis_metric))
return dis_metric
def evaluate_test_recon_loss(self):
print("Evaluating reconstruction loss in test set.")
recon_loss = 0
batch_size = 64
n_data_test = self.data_test.shape[0]
n_batches = int(n_data_test/batch_size)
print("Total batches:", n_batches)
for i in range(n_batches):
start_img = i*batch_size
end_img = (i+1)*batch_size
batch_imgs = self.data_test[start_img:end_img, :, :, :]
# Reconstruction without random sampling
dec_mean_logit = self.sess.run(self.dec_mean_logit, feed_dict={self.input_vae: batch_imgs})
# Error according to non-random reconstruction
this_recon_loss = self.sess.run(self.recon_loss,
feed_dict={self.dec_logit: dec_mean_logit, self.input_vae: batch_imgs})
recon_loss = recon_loss + this_recon_loss
if (i + 1) % 100 == 0:
print(str(i+1)+"/"+str(n_batches)+" evaluated.")
recon_loss = recon_loss / n_batches
print("Reconstruction loss: "+str(recon_loss))
return recon_loss
def compute_mean_kl_dim_wise(self, batch_mu, batch_logvar):
# Shape of batch_mu is [batch, z_dim], same for batch_logvar
# KL against N(0,1) is 0.5 * ( var_j - logvar_j + mean^2_j - 1 )
variance = np.exp(batch_logvar)
squared_mean = np.square(batch_mu)
batch_kl = 0.5 * (variance - batch_logvar + squared_mean - 1)
mean_kl = np.mean(batch_kl, axis=0)
return mean_kl
def evaluate_disentanglement(self, verbose=False):
n_examples_per_vote = 100 # Generated examples when we fix a factor (L in paper)
n_votes = 800 # Total number of training pairs for the classifier
n_factors = self.n_classes.shape[0]
n_votes_per_factor = int(n_votes / n_factors)
# First, we get all the necessary codes at once
all_mus = []
all_logvars = []
code_list = []
# Fix a factor k
for k_fixed in range(n_factors):
code_list_per_factor = []
# Generate training examples for this factor
for _ in range(n_votes_per_factor):
# Fix a value for this factor
fixed_value = np.random.choice(self.n_classes[k_fixed])
# Generate data with this factor fixed but all other factors varying randomly. Sample L examples.
useful_examples_idx = np.where(self.all_factors[:, k_fixed] == fixed_value)[0]
sampled_examples_idx = np.random.choice(useful_examples_idx, n_examples_per_vote)
sampled_imgs = self.all_imgs[sampled_examples_idx, :, :, :]
# Obtain their representations with the encoder
feed_dict = {self.input_vae: sampled_imgs}
code_mu, code_logvar = self.sess.run([self.enc_mean, self.enc_logvar], feed_dict=feed_dict)
all_mus.append(code_mu)
all_logvars.append(code_logvar)
code_list_per_factor.append((code_mu, code_logvar))
code_list.append(code_list_per_factor)
# Concatenate every code
all_mus = np.concatenate(all_mus, axis=0)
all_logvars = np.concatenate(all_logvars, axis=0)
# Now, lets compute the KL divergence of each dimension wrt the prior
emp_mean_kl = self.compute_mean_kl_dim_wise(all_mus, all_logvars)
# Throw the dimensions that collapsed to the prior
kl_tol = 1e-2
useful_dims = np.where(emp_mean_kl > kl_tol)[0]
# Compute scales for useful dims
scales = np.std(all_mus[:, useful_dims], axis=0)
if verbose:
print("Empirical mean for kl dimension-wise:")
print(np.reshape(emp_mean_kl, newshape=(-1, 1)))
print("Useful dimensions:", useful_dims, " - Total:", useful_dims.shape[0])
print("Empirical Scales:", scales)
# Dataset for classifier:
d_values = []
k_values = []
# Fix a factor k
for k_fixed in range(n_factors):
# Generate training examples for this factor
for i in range(n_votes_per_factor):
# Get previously generated codes
codes = code_list[k_fixed][i][0]
# Keep only useful dims
codes = codes[:, useful_dims]
# Normalise each dimension by its empirical standard deviation over the full data
# (or a large enough random subset)
norm_codes = codes / scales # dimension (L, z_dim)
# Take the empirical variance in each dimension of these normalised representations
emp_variance = np.var(norm_codes, axis=0) # dimension (z_dim,), variance for each dimension of code
# Then the index of the dimension with the lowest variance...
d_min_var = np.argmin(emp_variance)
# ...and the target index k provide one training input/output example for the classifier majority vote
d_values.append(d_min_var)
k_values.append(k_fixed)
d_values = np.array(d_values)
k_values = np.array(k_values)
# Since both inputs and outputs lie in a discrete space, the optimal classifier is the majority-vote classifier
# and the metric is the error rate of the classifier (actually they show the accuracy in the paper lol)
v_matrix = np.zeros((useful_dims.shape[0], n_factors))
for j in range(useful_dims.shape[0]):
for k in range(n_factors):
v_matrix[j, k] = np.sum((d_values == j) & (k_values == k))
if verbose:
print("Votes:")
print(v_matrix)
# Majority vote is C_j = argmax_k V_jk
classifier = np.argmax(v_matrix, axis=1)
predicted_k = classifier[d_values]
accuracy = np.sum(predicted_k == k_values) / n_votes
return accuracy
def get_traversals(self, example_index, show_figure=False):
# Return a list of arrays (n_travers, 64, 64), one per dimension.
# Dimensions are sorted in descending order of KL divergence
feed_dict = {self.input_vae: self.all_imgs[[example_index], :, :, :]}
z_base, logvar_base = self.sess.run([self.enc_mean, self.enc_logvar], feed_dict=feed_dict)
# Sort by KL (in descending order)
mean_kl = self.compute_mean_kl_dim_wise(z_base, logvar_base)
sorted_dims = np.argsort(-mean_kl)
trav_values = | np.arange(-2, 2.1, 0.5) | numpy.arange |
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 13 15:21:55 2019
@author: raryapratama
"""
#%%
#Step (1): Import Python libraries, set land conversion scenarios general parameters
import numpy as np
import matplotlib.pyplot as plt
from scipy.integrate import quad
import seaborn as sns
import pandas as pd
#PF_SF Scenario
##Set parameters
#Parameters for primary forest
initAGB = 233 #source: van Beijma et al. (2018)
initAGB_min = 233-72
initAGB_max = 233 + 72
#parameters for secondary forest. Sourc: Busch et al. (2019)
coeff_MF_nonpl = 11.47
coeff_DF_nonpl = 11.24
coeff_GL_nonpl = 9.42
coeff_MF_pl =17.2
tf = 201
a = 0.082
b = 2.53
#%%
#Step (2_1): C loss from the harvesting/clear cut
df1 = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_SF_EC.xlsx', 'PF_SF_S1')
df3 = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_SF_EC.xlsx', 'PF_SF_E')
t = range(0,tf,1)
c_firewood_energy_S1 = df1['Firewood_other_energy_use'].values
c_firewood_energy_E = df3['Firewood_other_energy_use'].values
#print(c_loss_S1)
#print(c_loss_E)
#%%
#Step (2_2): C loss from the harvesting/clear cut as wood pellets
dfE = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_SF_EC.xlsx', 'PF_SF_E')
c_pellets_E = df3['Wood_pellets'].values
#%%
#Step (3): Aboveground biomass (AGB) decomposition
#S1
df = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_SF_EC.xlsx', 'PF_SF_S1')
tf = 201
t = np.arange(tf)
def decomp_S1(t,remainAGB_S1):
return (1-(1-np.exp(-a*t))**b)*remainAGB_S1
#set zero matrix
output_decomp_S1 = np.zeros((len(t),len(df['C_remainAGB'].values)))
for i,remain_part_S1 in enumerate(df['C_remainAGB'].values):
#print(i,remain_part)
output_decomp_S1[i:,i] = decomp_S1(t[:len(t)-i],remain_part_S1)
print(output_decomp_S1[:,:4])
#find the yearly emissions from decomposition by calculating the differences between elements in list 'decomp_tot_S1'
#(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
# https://stackoverflow.com/questions/11095892/numpy-difference-between-neighboring-elements
#difference between element,
subs_matrix_S1 = np.zeros((len(t)-1,len(df['C_remainAGB'].values-1)))
i = 0
while i < tf:
subs_matrix_S1[:,i] = np.diff(output_decomp_S1[:,i])
i = i + 1
print(subs_matrix_S1[:,:4])
print(len(subs_matrix_S1))
#since there is no carbon emission from decomposition at the beginning of the year (esp. from 'year 1' onward),
#we have to replace the positive numbers with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913)
subs_matrix_S1 = subs_matrix_S1.clip(max=0)
print(subs_matrix_S1[:,:4])
#make the results as absolute values
subs_matrix_S1 = abs(subs_matrix_S1)
print(subs_matrix_S1[:,:4])
#insert row of zeros into the first row of the subs_matrix
zero_matrix_S1 = np.zeros((len(t)-200,len(df['C_remainAGB'].values)))
print(zero_matrix_S1)
subs_matrix_S1 = np.vstack((zero_matrix_S1, subs_matrix_S1))
print(subs_matrix_S1[:,:4])
#sum every column of the subs_matrix into one vector matrix
matrix_tot_S1 = (tf,1)
decomp_tot_S1 = np.zeros(matrix_tot_S1)
i = 0
while i < tf:
decomp_tot_S1[:,0] = decomp_tot_S1[:,0] + subs_matrix_S1[:,i]
i = i + 1
print(decomp_tot_S1[:,0])
#E
df = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_SF_EC.xlsx', 'PF_SF_E')
tf = 201
t = np.arange(tf)
def decomp_E_trial(t,remainAGB_E):
return (1-(1-np.exp(-a*t))**b)*remainAGB_E
#set zero matrix
output_decomp_E = np.zeros((len(t),len(df['C_remainAGB'].values)))
for i,remain_part_E in enumerate(df['C_remainAGB'].values):
#print(i,remain_part)
output_decomp_E[i:,i] = decomp_E_trial(t[:len(t)-i],remain_part_E)
print(output_decomp_E[:,:4])
#find the yearly emissions from decomposition by calculating the differences between elements in list 'decomp_tot_S1'
#(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
# https://stackoverflow.com/questions/11095892/numpy-difference-between-neighboring-elements
#difference between element,
subs_matrix_E = np.zeros((len(t)-1,len(df['C_remainAGB'].values-1)))
i = 0
while i < tf:
subs_matrix_E[:,i] = np.diff(output_decomp_E[:,i])
i = i + 1
print(subs_matrix_E[:,:4])
print(len(subs_matrix_E))
#since there is no carbon emission from decomposition at the beginning of the year (esp. from 'year 1' onward),
#we have to replace the positive numbers with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913)
subs_matrix_E = subs_matrix_E.clip(max=0)
print(subs_matrix_E[:,:4])
#make the results as absolute values
subs_matrix_E = abs(subs_matrix_E)
print(subs_matrix_E[:,:4])
#insert row of zeros into the first row of the subs_matrix
zero_matrix_E_trial = np.zeros((len(t)-200,len(df['C_remainAGB'].values)))
print(zero_matrix_E_trial)
subs_matrix_E = np.vstack((zero_matrix_E_trial, subs_matrix_E))
print(subs_matrix_E[:,:4])
#sum every column of the subs_matrix into one vector matrix
matrix_tot_E = (tf,1)
decomp_tot_E = np.zeros(matrix_tot_E)
i = 0
while i < tf:
decomp_tot_E[:,0] = decomp_tot_E[:,0] + subs_matrix_E[:,i]
i = i + 1
print(decomp_tot_E[:,0])
#plotting
t = np.arange(0,tf)
plt.plot(t,decomp_tot_S1,label='S1')
plt.plot(t,decomp_tot_E,label='E')
plt.xlim(0,200)
plt.legend(bbox_to_anchor=(1.04,1), loc="upper left", frameon=False)
plt.show()
type(decomp_tot_E[:,0])
#%%
#Step (4): Dynamic stock model of in-use wood materials
#HWP from primary forest, 35 year-old building materials lifetime
from dynamic_stock_model import DynamicStockModel
df1 = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_SF_EC.xlsx', 'PF_SF_S1')
dfE = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_SF_EC.xlsx', 'PF_SF_E')
#product lifetime
#building materials
B = 35
TestDSM1 = DynamicStockModel(t = df1['Year'].values, i = df1['Input_PF'].values, lt = {'Type': 'Normal', 'Mean': np.array([B]), 'StdDev': np.array([0.3*B])})
TestDSME = DynamicStockModel(t = dfE['Year'].values, i = dfE['Input_PF'].values, lt = {'Type': 'Normal', 'Mean': np.array([B]), 'StdDev': np.array([0.3*B])})
CheckStr1, ExitFlag1 = TestDSM1.dimension_check()
CheckStrE, ExitFlagE = TestDSME.dimension_check()
Stock_by_cohort1, ExitFlag1 = TestDSM1.compute_s_c_inflow_driven()
Stock_by_cohortE, ExitFlagE = TestDSME.compute_s_c_inflow_driven()
S1, ExitFlag1 = TestDSM1.compute_stock_total()
SE, ExitFlagE = TestDSME.compute_stock_total()
O_C1, ExitFlag1 = TestDSM1.compute_o_c_from_s_c()
O_CE, ExitFlagE = TestDSME.compute_o_c_from_s_c()
O1, ExitFlag1 = TestDSM1.compute_outflow_total()
OE, ExitFlagE = TestDSME.compute_outflow_total()
DS1, ExitFlag1 = TestDSM1.compute_stock_change()
DSE, ExitFlagE = TestDSME.compute_stock_change()
Bal1, ExitFlag1 = TestDSM1.check_stock_balance()
BalE, ExitFlagE = TestDSME.check_stock_balance()
#print output flow
print(TestDSM1.o)
print(TestDSME.o)
#%%
#Step (5): Biomass growth
t = range(0,tf,1)
#calculate the biomass and carbon content of moist forest
def Cgrowth_1(t):
return (44/12*1000*coeff_MF_nonpl*(np.sqrt(t)))
flat_list_moist = Cgrowth_1(t)
#calculate the biomass and carbon content of moist forest
def Cgrowth_2(t):
return (44/12*1000*coeff_DF_nonpl*(np.sqrt(t)))
flat_list_dry = Cgrowth_2(t)
#plotting
plt.plot (t,flat_list_moist, label = 'Moist Forest, non-plantation')
plt.plot (t,flat_list_dry, label = 'Dry forest, non-plantation')
plt.xlim([0, 200])
plt.xlabel('Year')
plt.ylabel('Carbon stock (tC/ha)')
plt.title('')
plt.legend(loc='upper left')
plt.savefig('C:\Work\Programming\C_removal_fig.png', dpi=300)
plt.show()
###Yearly Sequestration
###Moist Forest
#find the yearly sequestration by calculating the differences between elements in list 'flat_list_moist'(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
flat_list_moist = [p - q for q, p in zip(flat_list_moist, flat_list_moist[1:])]
#insert 0 value to the list as the first element, because there is no sequestration in year 0
var = 0
flat_list_moist.insert(0,var)
#make 'flat_list_moist' elements negative numbers to denote sequestration
flat_list_moist = [ -x for x in flat_list_moist]
print(flat_list_moist)
#Dry forest
#find the yearly sequestration by calculating the differences between elements in list 'flat_list_dry'(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
flat_list_dry = [t - u for u, t in zip(flat_list_dry, flat_list_dry[1:])]
#insert 0 value to the list as the first element, because there is no sequestration in year 0
var = 0
flat_list_dry.insert(0,var)
#make 'flat_list_dry' elements negative numbers to denote sequestration
flat_list_dry = [ -x for x in flat_list_dry]
print(flat_list_dry)
#%%
#Step(6): post-harvest processing of wood
#post-harvest wood processing
df1 = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_SF_EC.xlsx', 'PF_SF_S1')
df3 = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_SF_EC.xlsx', 'PF_SF_E')
t = range(0,tf,1)
PH_Emissions_HWP1_S1 = df1['PH_Emissions_HWP'].values
PH_Emissions_HWP1_E = df3['PH_Emissions_HWP'].values
#%%
#Step (7_1): landfill gas decomposition (CH4)
#CH4 decomposition
hl = 20 #half-live
k = (np.log(2))/hl
#S1
df1_CH4 = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_SF_EC.xlsx', 'PF_SF_S1')
tf = 201
t = np.arange(tf)
def decomp_CH4_S1(t,Landfill_decomp_CH4_S1):
return (1-(1-np.exp(-k*t)))*Landfill_decomp_CH4_S1
#set zero matrix
output_decomp_CH4_S1 = np.zeros((len(t),len(df1_CH4['Landfill_decomp_CH4'].values)))
for i,remain_part_CH4_S1 in enumerate(df1_CH4['Landfill_decomp_CH4'].values):
#print(i,remain_part)
output_decomp_CH4_S1[i:,i] = decomp_CH4_S1(t[:len(t)-i],remain_part_CH4_S1)
print(output_decomp_CH4_S1[:,:4])
#find the yearly emissions from decomposition by calculating the differences between elements in list 'decomp_tot_S1'
#(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
# https://stackoverflow.com/questions/11095892/numpy-difference-between-neighboring-elements
#difference between element,
subs_matrix_CH4_S1 = np.zeros((len(t)-1,len(df1_CH4['Landfill_decomp_CH4'].values-1)))
i = 0
while i < tf:
subs_matrix_CH4_S1[:,i] = np.diff(output_decomp_CH4_S1[:,i])
i = i + 1
print(subs_matrix_CH4_S1[:,:4])
print(len(subs_matrix_CH4_S1))
#since there is no carbon emission from decomposition at the beginning of the year (esp. from 'year 1' onward),
#we have to replace the positive numbers with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913)
subs_matrix_CH4_S1 = subs_matrix_CH4_S1.clip(max=0)
print(subs_matrix_CH4_S1[:,:4])
#make the results as absolute values
subs_matrix_CH4_S1 = abs(subs_matrix_CH4_S1)
print(subs_matrix_CH4_S1[:,:4])
#insert row of zeros into the first row of the subs_matrix
zero_matrix_CH4_S1 = np.zeros((len(t)-200,len(df1_CH4['Landfill_decomp_CH4'].values)))
print(zero_matrix_CH4_S1)
subs_matrix_CH4_S1 = np.vstack((zero_matrix_CH4_S1, subs_matrix_CH4_S1))
print(subs_matrix_CH4_S1[:,:4])
#sum every column of the subs_matrix into one vector matrix
matrix_tot_CH4_S1 = (tf,1)
decomp_tot_CH4_S1 = np.zeros(matrix_tot_CH4_S1)
i = 0
while i < tf:
decomp_tot_CH4_S1[:,0] = decomp_tot_CH4_S1[:,0] + subs_matrix_CH4_S1[:,i]
i = i + 1
print(decomp_tot_CH4_S1[:,0])
#E
dfE_CH4 = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_SF_EC.xlsx', 'PF_SF_E')
tf = 201
t = np.arange(tf)
def decomp_CH4_E(t,Landfill_decomp_CH4_E):
return (1-(1-np.exp(-k*t)))*Landfill_decomp_CH4_E
#set zero matrix
output_decomp_CH4_E = np.zeros((len(t),len(dfE_CH4['Landfill_decomp_CH4'].values)))
for i,remain_part_CH4_E in enumerate(dfE_CH4['Landfill_decomp_CH4'].values):
#print(i,remain_part)
output_decomp_CH4_E[i:,i] = decomp_CH4_E(t[:len(t)-i],remain_part_CH4_E)
print(output_decomp_CH4_E[:,:4])
#find the yearly emissions from decomposition by calculating the differences between elements in list 'decomp_tot_S1'
#(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
# https://stackoverflow.com/questions/11095892/numpy-difference-between-neighboring-elements
#difference between element,
subs_matrix_CH4_E = np.zeros((len(t)-1,len(dfE_CH4['Landfill_decomp_CH4'].values-1)))
i = 0
while i < tf:
subs_matrix_CH4_E[:,i] = np.diff(output_decomp_CH4_E[:,i])
i = i + 1
print(subs_matrix_CH4_E[:,:4])
print(len(subs_matrix_CH4_E))
#since there is no carbon emission from decomposition at the beginning of the year (esp. from 'year 1' onward),
#we have to replace the positive numbers with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913)
subs_matrix_CH4_E = subs_matrix_CH4_E.clip(max=0)
print(subs_matrix_CH4_E[:,:4])
#make the results as absolute values
subs_matrix_CH4_E = abs(subs_matrix_CH4_E)
print(subs_matrix_CH4_E[:,:4])
#insert row of zeros into the first row of the subs_matrix
zero_matrix_CH4_E = np.zeros((len(t)-200,len(dfE_CH4['Landfill_decomp_CH4'].values)))
print(zero_matrix_CH4_E)
subs_matrix_CH4_E = np.vstack((zero_matrix_CH4_E, subs_matrix_CH4_E))
print(subs_matrix_CH4_E[:,:4])
#sum every column of the subs_matrix into one vector matrix
matrix_tot_CH4_E = (tf,1)
decomp_tot_CH4_E = np.zeros(matrix_tot_CH4_E)
i = 0
while i < tf:
decomp_tot_CH4_E[:,0] = decomp_tot_CH4_E[:,0] + subs_matrix_CH4_E[:,i]
i = i + 1
print(decomp_tot_CH4_E[:,0])
#plotting
t = np.arange(0,tf)
plt.plot(t,decomp_tot_CH4_S1,label='S1')
plt.plot(t,decomp_tot_CH4_E,label='E')
plt.xlim(0,200)
plt.legend(bbox_to_anchor=(1.04,1), loc="upper left", frameon=False)
plt.show()
type(decomp_tot_CH4_S1[:,0])
#%%
#Step (7_2): landfill gas decomposition (CO2)
#CO2 decomposition
hl = 20 #half-live
k = (np.log(2))/hl
#S1
df1_CO2 = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_SF_EC.xlsx', 'PF_SF_S1')
tf = 201
t = np.arange(tf)
def decomp_CO2_S1(t,Landfill_decomp_CO2_S1):
return (1-(1-np.exp(-k*t)))*Landfill_decomp_CO2_S1
#set zero matrix
output_decomp_CO2_S1 = np.zeros((len(t),len(df1_CO2['Landfill_decomp_CO2'].values)))
for i,remain_part_CO2_S1 in enumerate(df1_CO2['Landfill_decomp_CO2'].values):
#print(i,remain_part)
output_decomp_CO2_S1[i:,i] = decomp_CO2_S1(t[:len(t)-i],remain_part_CO2_S1)
print(output_decomp_CO2_S1[:,:4])
#find the yearly emissions from decomposition by calculating the differences between elements in list 'decomp_tot_S1'
#(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
# https://stackoverflow.com/questions/11095892/numpy-difference-between-neighboring-elements
#difference between element,
subs_matrix_CO2_S1 = np.zeros((len(t)-1,len(df1_CO2['Landfill_decomp_CO2'].values-1)))
i = 0
while i < tf:
subs_matrix_CO2_S1[:,i] = np.diff(output_decomp_CO2_S1[:,i])
i = i + 1
print(subs_matrix_CO2_S1[:,:4])
print(len(subs_matrix_CO2_S1))
#since there is no carbon emission from decomposition at the beginning of the year (esp. from 'year 1' onward),
#we have to replace the positive numbers with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913)
subs_matrix_CO2_S1 = subs_matrix_CO2_S1.clip(max=0)
print(subs_matrix_CO2_S1[:,:4])
#make the results as absolute values
subs_matrix_CO2_S1 = abs(subs_matrix_CO2_S1)
print(subs_matrix_CO2_S1[:,:4])
#insert row of zeros into the first row of the subs_matrix
zero_matrix_CO2_S1 = np.zeros((len(t)-200,len(df1_CO2['Landfill_decomp_CO2'].values)))
print(zero_matrix_CO2_S1)
subs_matrix_CO2_S1 = np.vstack((zero_matrix_CO2_S1, subs_matrix_CO2_S1))
print(subs_matrix_CO2_S1[:,:4])
#sum every column of the subs_matrix into one vector matrix
matrix_tot_CO2_S1 = (tf,1)
decomp_tot_CO2_S1 = np.zeros(matrix_tot_CO2_S1)
i = 0
while i < tf:
decomp_tot_CO2_S1[:,0] = decomp_tot_CO2_S1[:,0] + subs_matrix_CO2_S1[:,i]
i = i + 1
print(decomp_tot_CO2_S1[:,0])
#E
dfE_CO2 = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_SF_EC.xlsx', 'PF_SF_E')
tf = 201
t = np.arange(tf)
def decomp_CO2_E(t,Landfill_decomp_CO2_E):
return (1-(1-np.exp(-k*t)))*Landfill_decomp_CO2_E
#set zero matrix
output_decomp_CO2_E = np.zeros((len(t),len(dfE_CO2['Landfill_decomp_CO2'].values)))
for i,remain_part_CO2_E in enumerate(dfE_CO2['Landfill_decomp_CO2'].values):
#print(i,remain_part)
output_decomp_CO2_E[i:,i] = decomp_CO2_E(t[:len(t)-i],remain_part_CO2_E)
print(output_decomp_CO2_E[:,:4])
#find the yearly emissions from decomposition by calculating the differences between elements in list 'decomp_tot_S1'
#(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
# https://stackoverflow.com/questions/11095892/numpy-difference-between-neighboring-elements
#difference between element,
subs_matrix_CO2_E = np.zeros((len(t)-1,len(dfE_CO2['Landfill_decomp_CO2'].values-1)))
i = 0
while i < tf:
subs_matrix_CO2_E[:,i] = np.diff(output_decomp_CO2_E[:,i])
i = i + 1
print(subs_matrix_CO2_E[:,:4])
print(len(subs_matrix_CO2_E))
#since there is no carbon emission from decomposition at the beginning of the year (esp. from 'year 1' onward),
#we have to replace the positive numbers with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913)
subs_matrix_CO2_E = subs_matrix_CO2_E.clip(max=0)
print(subs_matrix_CO2_E[:,:4])
#make the results as absolute values
subs_matrix_CO2_E = abs(subs_matrix_CO2_E)
print(subs_matrix_CO2_E[:,:4])
#insert row of zeros into the first row of the subs_matrix
zero_matrix_CO2_E = np.zeros((len(t)-200,len(dfE_CO2['Landfill_decomp_CO2'].values)))
print(zero_matrix_CO2_E)
subs_matrix_CO2_E = np.vstack((zero_matrix_CO2_E, subs_matrix_CO2_E))
print(subs_matrix_CO2_E[:,:4])
#sum every column of the subs_matrix into one vector matrix
matrix_tot_CO2_E = (tf,1)
decomp_tot_CO2_E = np.zeros(matrix_tot_CO2_E)
i = 0
while i < tf:
decomp_tot_CO2_E[:,0] = decomp_tot_CO2_E[:,0] + subs_matrix_CO2_E[:,i]
i = i + 1
print(decomp_tot_CO2_E[:,0])
#plotting
t = np.arange(0,tf)
plt.plot(t,decomp_tot_CO2_S1,label='S1')
plt.plot(t,decomp_tot_CO2_E,label='E')
plt.xlim(0,200)
plt.legend(bbox_to_anchor=(1.04,1), loc="upper left", frameon=False)
plt.show()
type(decomp_tot_CO2_S1[:,0])
#%%
#Step (8): Sum the emissions and sequestration (net carbon balance), CO2 and CH4 are separated
#https://stackoverflow.com/questions/52703442/python-sum-values-from-multiple-lists-more-than-two
#C_loss + C_remainAGB + C_remainHWP + PH_Emissions_PO
Emissions_PF_SF_S1 = [c_firewood_energy_S1, decomp_tot_S1[:,0], TestDSM1.o, PH_Emissions_HWP1_S1, decomp_tot_CO2_S1[:,0]]
Emissions_PF_SF_E = [c_firewood_energy_E, c_pellets_E, decomp_tot_E[:,0], TestDSME.o, PH_Emissions_HWP1_E, decomp_tot_CO2_E[:,0]]
Emissions_PF_SF_S1 = [sum(x) for x in zip(*Emissions_PF_SF_S1)]
Emissions_PF_SF_E = [sum(x) for x in zip(*Emissions_PF_SF_E)]
#CH4_S1
Emissions_CH4_PF_SF_S1 = decomp_tot_CH4_S1[:,0]
#CH4_E
Emissions_CH4_PF_SF_E = decomp_tot_CH4_E[:,0]
#%%
#Step (9): Generate the excel file (emissions_seq_scenarios.xlsx) from Step (8) calculation
#print year column
year = []
for x in range (0, tf):
year.append(x)
print (year)
#print CH4 emission column
import itertools
lst = [0]
Emissions_CH4 = list(itertools.chain.from_iterable(itertools.repeat(x, tf) for x in lst))
print(Emissions_CH4)
#print emission ref
lst1 = [0]
Emission_ref = list(itertools.chain.from_iterable(itertools.repeat(x, tf) for x in lst1))
print(Emission_ref)
#replace the first element with 1 to denote the emission reference as year 0 (for dynGWP calculation)
Emission_ref[0] = 1
print(Emission_ref)
Col1 = year
Col2_1 = Emissions_PF_SF_S1
Col2_E = Emissions_PF_SF_E
Col3_1 = Emissions_CH4_PF_SF_S1
Col3_E = Emissions_CH4_PF_SF_E
Col4 = flat_list_moist
Col5 = Emission_ref
Col6 = flat_list_dry
#S1
df1_moi = pd.DataFrame.from_dict({'Year':Col1,'kg_CO2':Col2_1,'kg_CH4':Col3_1,'kg_CO2_seq':Col4,'emission_ref':Col5})
df1_dry = pd.DataFrame.from_dict({'Year':Col1,'kg_CO2':Col2_1,'kg_CH4':Col3_1,'kg_CO2_seq':Col6,'emission_ref':Col5})
#E
dfE_moi = pd.DataFrame.from_dict({'Year':Col1,'kg_CO2':Col2_E,'kg_CH4':Col3_E,'kg_CO2_seq':Col4,'emission_ref':Col5})
dfE_dry = pd.DataFrame.from_dict({'Year':Col1,'kg_CO2':Col2_E,'kg_CH4':Col3_E,'kg_CO2_seq':Col6,'emission_ref':Col5})
writer = pd.ExcelWriter('emissions_seq_PF_SF_EC.xlsx', engine = 'xlsxwriter')
df1_moi.to_excel(writer, sheet_name = 'S1_moist', header=True, index=False )
df1_dry.to_excel(writer, sheet_name = 'S1_dry', header=True, index=False)
dfE_moi.to_excel(writer, sheet_name = 'E_moist', header=True, index=False)
dfE_dry.to_excel(writer, sheet_name = 'E_dry', header=True, index=False)
writer.save()
writer.close()
#%%
## DYNAMIC LCA, for wood-based scenarios
# Step (10): Set General Parameters for Dynamic LCA calculation
# General Parameters
aCH4 = 0.129957e-12; # methane - instantaneous radiative forcing per unit mass [W/m2 /kgCH4]
TauCH4 = 12; # methane - lifetime (years)
aCO2 = 0.0018088e-12; # CO2 - instantaneous radiative forcing per unit mass [W/m2 /kgCO2]
TauCO2 = [172.9, 18.51, 1.186]; # CO2 parameters according to Bern carbon cycle-climate model
aBern = [0.259, 0.338, 0.186]; # CO2 parameters according to Bern carbon cycle-climate model
a0Bern = 0.217; # CO2 parameters according to Bern carbon cycle-climate model
tf = 202 #until 202 because we want to get the DCF(t-i) until DCF(201) to determine the impact from the emission from the year 200 (There is no DCF(0))
#%%
#Step (11): Bern 2.5 CC Model, determine atmospheric load (C(t)) for GHG (CO2 and CH4)
t = range(0,tf,1)
## CO2 calculation formula
# time dependant atmospheric load for CO2, Bern model
def C_CO2(t):
return a0Bern + aBern[0]*np.exp(-t/TauCO2[0]) + aBern[1]*np.exp(-t/TauCO2[1]) + aBern[2]*np.exp(-t/TauCO2[2])
output_CO2 = np.array([C_CO2(ti) for ti in t])
print(output_CO2)
## CH4 calculation formula
# time dependant atmospheric load for non-CO2 GHGs (Methane)
def C_CH4(t):
return np.exp(-t/TauCH4)
output_CH4 = np.array([C_CH4(ti) for ti in t])
plt.xlim([0, 200])
plt.ylim([0,1.1])
plt.plot(t, output_CO2, output_CH4)
plt.xlabel('Time (year)')
plt.ylabel('Fraction of CO$_2$')
plt.show()
output_CH4.size
#%%
#determine the C(t) for CO2
s = []
t = np.arange(0,tf,1)
for i in t:
s.append(quad(C_CO2,i-1,i))
res_list_CO2 = [x[0] for x in s]
len(res_list_CO2)
#%%
#determine the C(t) for CH4
s = []
for i in t:
s.append(quad(C_CH4,i-1,i))
res_list_CH4 = [p[0] for p in s]
#plot
plt.xlim([0, 200])
plt.ylim([0,1.5])
plt.plot(t, res_list_CO2, res_list_CH4)
plt.show()
#%%
#Step (12): Determine dynamic characterization factors (DCF) for CO2 and CH4
DCF_inst_CO2 = aCO2 * np.array(res_list_CO2)
print(DCF_inst_CO2)
DCF_inst_CH4 = aCH4 * np.array(res_list_CH4)
plt.xlim([0, 200])
plt.ylim([0,4e-15])
plt.plot(t, DCF_inst_CO2, DCF_inst_CH4)
plt.xlabel('Time (year)')
plt.ylabel('DCF_inst (10$^{-15}$ W/m$^2$.kg CO$_2$)')
plt.show()
len(DCF_inst_CO2)
#%%
#Step (13): import emission data from emissions_seq_scenarios.xlsx (Step (9))
#wood-based
#read S1_moist
df = pd.read_excel('emissions_seq_PF_SF_EC.xlsx', 'S1_moist') # can also index sheet by name or fetch all sheets
emission_CO2_S1moi = df['kg_CO2'].tolist()
emission_CH4_S1moi = df['kg_CH4'].tolist()
emission_CO2_seq_S1moi = df['kg_CO2_seq'].tolist()
emission_CO2_ref = df['emission_ref'].tolist()
#read S1_dry
df = pd.read_excel('emissions_seq_PF_SF_EC.xlsx', 'S1_dry')
emission_CO2_S1dry = df['kg_CO2'].tolist()
emission_CH4_S1dry = df['kg_CH4'].tolist()
emission_CO2_seq_S1dry = df['kg_CO2_seq'].tolist()
#read E_moist
df = pd.read_excel('emissions_seq_PF_SF_EC.xlsx', 'E_moist') # can also index sheet by name or fetch all sheets
emission_CO2_Emoi = df['kg_CO2'].tolist()
emission_CH4_Emoi = df['kg_CH4'].tolist()
emission_CO2_seq_Emoi = df['kg_CO2_seq'].tolist()
#read E_dry
df = pd.read_excel('emissions_seq_PF_SF_EC.xlsx', 'E_dry')
emission_CO2_Edry = df['kg_CO2'].tolist()
emission_CH4_Edry = df['kg_CH4'].tolist()
emission_CO2_seq_Edry = df['kg_CO2_seq'].tolist()
#%%
#Step (14): import emission data from the counter-use of non-renewable materials/energy scenarios (NR)
#read S1
df = pd.read_excel('PF_SF_EC.xlsx', 'NonRW_PF_SF_S1') # can also index sheet by name or fetch all sheets
emission_NonRW_PF_SF_S1 = df['NonRW_emissions'].tolist()
emission_NonRW_PF_SF_S1_seq = df['kg_CO2_seq'].tolist()
emission_CO2_ref = df['emission_ref'].tolist()
#read E
df = pd.read_excel('PF_SF_EC.xlsx', 'NonRW_PF_SF_E') # can also index sheet by name or fetch all sheets
emission_NonRW_PF_SF_E = df['NonRW_emissions'].tolist()
emission_NonRW_PF_SF_E_seq = df['kg_CO2_seq'].tolist()
#%%
#Step (15): Determine the time elapsed dynamic characterization factors, DCF(t-ti), for CO2 and CH4
#DCF(t-i) CO2
matrix = (tf-1,tf-1)
DCF_CO2_ti = np.zeros(matrix)
for t in range(0,tf-1):
i = -1
while i < t:
DCF_CO2_ti[i+1,t] = DCF_inst_CO2[t-i]
i = i + 1
print(DCF_CO2_ti)
#sns.heatmap(DCF_CO2_ti)
DCF_CO2_ti.shape
#DCF(t-i) CH4
matrix = (tf-1,tf-1)
DCF_CH4_ti = np.zeros(matrix)
for t in range(0,tf-1):
i = -1
while i < t:
DCF_CH4_ti[i+1,t] = DCF_inst_CH4[t-i]
i = i + 1
print(DCF_CH4_ti)
#sns.heatmap(DCF_CH4_ti)
DCF_CH4_ti.shape
#%%
# Step (16): Calculate instantaneous global warming impact (GWI)
##wood-based
#S1_moist
t = np.arange(0,tf-1,1)
matrix_GWI_S1moi = (tf-1,3)
GWI_inst_S1moi = np.zeros(matrix_GWI_S1moi)
for t in range(0,tf-1):
GWI_inst_S1moi[t,0] = np.sum(np.multiply(emission_CO2_S1moi,DCF_CO2_ti[:,t]))
GWI_inst_S1moi[t,1] = np.sum(np.multiply(emission_CH4_S1moi,DCF_CH4_ti[:,t]))
GWI_inst_S1moi[t,2] = np.sum(np.multiply(emission_CO2_seq_S1moi,DCF_CO2_ti[:,t]))
matrix_GWI_tot_S1moi = (tf-1,1)
GWI_inst_tot_S1moi = np.zeros(matrix_GWI_tot_S1moi)
GWI_inst_tot_S1moi[:,0] = np.array(GWI_inst_S1moi[:,0] + GWI_inst_S1moi[:,1] + GWI_inst_S1moi[:,2])
print(GWI_inst_tot_S1moi[:,0])
t = | np.arange(0,tf-1,1) | numpy.arange |
import os
import cv2
import json
import torch
import argparse
import numpy as np
from db import Test_Transform, MVTEC
from factory import load_test_model_from_factory
from tools import *
def parse_args():
parser = argparse.ArgumentParser(description='Unsupervised defect segmentaion base on auto-encoder.')
parser.add_argument('--cfg', help="Path of config file", type=str, required=True)
parser.add_argument('--model_path', help="Path of model", type=str, required=True)
parser.add_argument('--gpu_id', help="ID of GPU", type=int, default=0)
parser.add_argument('--res_dir', help="Directory path of result", type=str, default='./eval_result')
parser.add_argument('--retest', default=False, type=bool)
return parser.parse_args()
def load_params(net, path):
from collections import OrderedDict
new_state_dict = OrderedDict()
w_dict = torch.load(path)
for k, v in w_dict.items():
head = k[:7]
if head == 'module.':
name = k[7:] # remove `module.`
else:
name = k
new_state_dict[name] = v
net.load_state_dict(new_state_dict)
if __name__ == '__main__':
args = parse_args()
# load config file
cfg_file = os.path.join('./config', args.cfg + '.json')
with open(cfg_file, "r") as f:
configs = json.load(f)
if not os.path.exists(args.res_dir):
os.mkdir(args.res_dir)
# load data set
test_set = MVTEC(root=configs['db']['data_dir'], set=configs['db']['test_split'], preproc=None)
transform = Test_Transform()
print('Data set: {} has been loaded'.format(configs['db']['name']))
# load model
net = load_test_model_from_factory(configs)
load_params(net, args.model_path)
net = net.eval().cuda(args.gpu_id)
print('Model: {} has been loaded'.format(configs['model']['name']))
print('Start Testing... ')
_t = Timer()
cost_time = list()
for item in test_set.test_dict:
item_dict = test_set.test_dict[item]
if not os.path.exists(os.path.join(args.res_dir, item)):
os.mkdir(os.path.join(args.res_dir, item))
for type in item_dict:
if not os.path.exists(os.path.join(args.res_dir, item, type)):
os.mkdir(os.path.join(args.res_dir, item, type))
_time = list()
img_list = item_dict[type]
for img_info in img_list:
path = img_info[0]
IsTexture = img_info[1]
image = cv2.imread(path, cv2.IMREAD_GRAYSCALE)
img_id = path.split('.')[0][-3:]
_t.tic()
ori_img, input_tensor = transform(image, IsTexture)
with torch.no_grad():
input_tensor = input_tensor.cuda(args.gpu_id)
re_img = net(input_tensor)
inference_time = _t.toc()
_time.append(inference_time)
# fetech from GPU
re_img = torch.squeeze(re_img)
re_img = re_img.cpu().numpy()
# re_img = re_img.transpose((1, 2, 0))
# projected to Grayscale image
re_img = re_img * 255
re_img = re_img.astype(np.uint8)
# save rebuilt image
if IsTexture is True:
con_img = np.zeros([256, 256], dtype=np.uint8)
con_img[0:128, 0:128] = re_img[0]
con_img[128:256, 0:128] = re_img[1]
con_img[0:128, 128:256] = re_img[2]
con_img[128:256, 128:256] = re_img[3]
cv2.imwrite(os.path.join(args.res_dir, item, type, 're_{}.png'.format(img_id)), con_img)
else:
cv2.imwrite(os.path.join(args.res_dir, item, type, 're_{}.png'.format(img_id)), re_img)
cost_time += _time
mean_time = np.array(_time).mean()
print('Evaluate: Item:{}; Type:{}; Mean time:{:.1f}ms'.format(item, type, mean_time * 1000))
_t.clear()
# calculate mean time
cost_time = | np.array(cost_time) | numpy.array |
import flask_sqlalchemy
import numpy as np
db = flask_sqlalchemy.SQLAlchemy()
class PlayersTeams(db.Model):
__tablename__ = "players_teams"
player_id = db.Column(db.String(20), db.ForeignKey("player.id"), primary_key=True)
team_id = db.Column(db.String(20), db.ForeignKey("team.id"), primary_key=True)
season = db.Column(db.Integer, primary_key=True)
@staticmethod
def mapper(csv_data):
return {
"team_id": str(csv_data["TEAM_ID"]) if csv_data["TEAM_ID"] else None,
"player_id": str(csv_data["PLAYER_ID"]) if csv_data["PLAYER_ID"] else None,
"season": int(csv_data["SEASON"])
if not np.isnan(csv_data["SEASON"])
else None,
}
class League(db.Model):
__tablename__ = "league"
id = db.Column(db.String(20), primary_key=True)
teams = db.relationship("Team")
rankings = db.relationship("Ranking")
def __repr__(self):
return f"<League {self.id}>"
def to_json(self, expand=False):
return {
"id": self.id,
}
@staticmethod
def mapper(csv_data):
return {"id": str(csv_data["LEAGUE_ID"])}
class Team(db.Model):
__tablename__ = "team"
id = db.Column(db.String(20), primary_key=True)
min_year = db.Column(db.Integer)
max_year = db.Column(db.Integer)
abbreviation = db.Column(db.String(10))
nickname = db.Column(db.String(100))
yearfounded = db.Column(db.Integer)
city = db.Column(db.String(100))
arena = db.Column(db.String(100))
arenacapacity = db.Column(db.Integer)
owner = db.Column(db.String(100))
generalmanager = db.Column(db.String(100))
headcoach = db.Column(db.String(100))
dleagueaffiliation = db.Column(db.String(100))
league_id = db.Column(db.String(20), db.ForeignKey("league.id"), default="0")
players = db.relationship(
"Player", secondary="players_teams", back_populates="teams"
)
home_games = db.relationship(
"Game", foreign_keys="Game.home_team_id", backref="home_team", lazy="dynamic"
)
away_games = db.relationship(
"Game", foreign_keys="Game.away_team_id", backref="away_team", lazy="dynamic"
)
game_details = db.relationship("GameDetail", backref="team", lazy=True)
def __repr__(self):
return f"<Team {self.id}>"
def to_json(self):
return {
"id": self.id,
}
@property
def games(self):
return self.home_games.union(self.away_games)
@staticmethod
def mapper(csv_data):
return {
"league_id": str(csv_data["LEAGUE_ID"]) if csv_data["LEAGUE_ID"] else None,
"id": str(csv_data["TEAM_ID"]) if csv_data["TEAM_ID"] else None,
"min_year": int(csv_data["MIN_YEAR"])
if not np.isnan(csv_data["MIN_YEAR"])
else None,
"max_year": int(csv_data["MAX_YEAR"])
if not np.isnan(csv_data["MAX_YEAR"])
else None,
"abbreviation": str(csv_data["ABBREVIATION"])
if csv_data["ABBREVIATION"]
else None,
"nickname": str(csv_data["NICKNAME"]) if csv_data["NICKNAME"] else None,
"yearfounded": int(csv_data["YEARFOUNDED"])
if not np.isnan(csv_data["YEARFOUNDED"])
else None,
"city": str(csv_data["CITY"]) if csv_data["CITY"] else None,
"arena": str(csv_data["ARENA"]) if csv_data["ARENA"] else None,
"arenacapacity": int(csv_data["ARENACAPACITY"])
if not np.isnan(csv_data["ARENACAPACITY"])
else None,
"owner": str(csv_data["OWNER"]) if csv_data["OWNER"] else None,
"generalmanager": str(csv_data["GENERALMANAGER"])
if csv_data["GENERALMANAGER"]
else None,
"headcoach": str(csv_data["HEADCOACH"]) if csv_data["HEADCOACH"] else None,
"dleagueaffiliation": str(csv_data["DLEAGUEAFFILIATION"])
if csv_data["DLEAGUEAFFILIATION"]
else None,
}
class Player(db.Model):
__tablename__ = "player"
id = db.Column(db.String(20), primary_key=True)
player_name = db.Column(db.String(100))
game_details = db.relationship("GameDetail", backref="player", lazy=True)
teams = db.relationship("Team", secondary="players_teams", back_populates="players")
def __repr__(self):
return f"<Player {self.id}>"
def to_json(self):
return {"id": self.id}
@staticmethod
def mapper(csv_data):
return {
"player_name": str(csv_data["PLAYER_NAME"])
if csv_data["PLAYER_NAME"]
else None,
"id": str(csv_data["PLAYER_ID"]) if csv_data["PLAYER_ID"] else None,
}
class Game(db.Model):
__tablename__ = "game"
id = db.Column(db.String(20), primary_key=True)
game_date_est = db.Column(db.DateTime)
game_status_text = db.Column(db.String(50))
season = db.Column(db.Integer)
pts_home = db.Column(db.Integer)
fg_pct_home = db.Column(db.Float)
ft_pct_home = db.Column(db.Float)
fg3_pct_home = db.Column(db.Float)
ast_home = db.Column(db.Integer)
reb_home = db.Column(db.Integer)
pts_away = db.Column(db.Integer)
fg_pct_away = db.Column(db.Float)
ft_pct_away = db.Column(db.Float)
fg3_pct_away = db.Column(db.Float)
ast_away = db.Column(db.Integer)
reb_away = db.Column(db.Integer)
home_team_wins = db.Column(db.Integer)
home_team_id = db.Column(db.String(20), db.ForeignKey("team.id"))
away_team_id = db.Column(db.String(20), db.ForeignKey("team.id"))
game_details = db.relationship("GameDetail", backref="game", lazy=True)
def __repr__(self):
return f"<Game {self.id}>"
def to_json(self):
return {"id": self.id}
@staticmethod
def mapper(csv_data):
return {
"game_date_est": csv_data["GAME_DATE_EST"]
if csv_data["GAME_DATE_EST"]
else None,
"id": str(csv_data["GAME_ID"]) if csv_data["GAME_ID"] else None,
"game_status_text": str(csv_data["GAME_STATUS_TEXT"])
if csv_data["GAME_STATUS_TEXT"]
else None,
"season": int(csv_data["SEASON"])
if not np.isnan(csv_data["SEASON"])
else None,
"home_team_id": str(csv_data["TEAM_ID_home"])
if csv_data["TEAM_ID_home"]
else None,
"pts_home": int(csv_data["PTS_home"])
if not np.isnan(csv_data["PTS_home"])
else None,
"fg_pct_home": float(csv_data["FG_PCT_home"])
if not | np.isnan(csv_data["FG_PCT_home"]) | numpy.isnan |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2019 <NAME> (Nagoya University)
# based on PyTorch implementation for WaveNet vocoder by <NAME> (Nagoya University)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
from __future__ import division
import argparse
from distutils.util import strtobool
import logging
import math
import os
import sys
import numpy as np
import torch
import torch.multiprocessing as mp
from utils import find_files, read_hdf5, read_txt, write_hdf5
from gru_vae import GRU_RNN, sampling_vae_batch
from dtw_c import dtw_c as dtw
np.set_printoptions(threshold=np.inf)
def main():
parser = argparse.ArgumentParser()
# decode setting
parser.add_argument("--feats", required=True,
type=str, help="list or directory of source eval feat files")
parser.add_argument("--feats_trg", required=True,
type=str, help="list or directory of source eval feat files")
parser.add_argument("--stats_src", required=True,
type=str, help="hdf5 file including source statistics")
parser.add_argument("--stats_trg", required=True,
type=str, help="hdf5 file including target statistics")
parser.add_argument("--stats_jnt",
type=str, help="hdf5 file including target statistics")
parser.add_argument("--model", required=True,
type=str, help="model file")
parser.add_argument("--config", required=True,
type=str, help="configure file")
parser.add_argument("--n_gpus", default=1,
type=int, help="number of gpus")
parser.add_argument("--n_smpl_dec", default=300,
type=int, help="number of gpus")
parser.add_argument("--outdir", required=True,
type=str, help="directory to save generated samples")
parser.add_argument("--write_gv", default=False,
type=strtobool, help="flag to write gv stats")
# other setting
parser.add_argument("--seed", default=1,
type=int, help="seed number")
parser.add_argument("--GPU_device", default=0,
type=int, help="selection of GPU device")
parser.add_argument("--verbose", default=1,
type=int, help="log level")
args = parser.parse_args()
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = str(args.GPU_device)
# check directory existence
if not os.path.exists(args.outdir):
os.makedirs(args.outdir)
# set log level
if args.verbose > 0:
logging.basicConfig(level=logging.INFO,
format='%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s',
datefmt='%m/%d/%Y %I:%M:%S',
filename=args.outdir + "/decode.log")
logging.getLogger().addHandler(logging.StreamHandler())
elif args.verbose > 1:
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s',
datefmt='%m/%d/%Y %I:%M:%S',
filename=args.outdir + "/decode.log")
logging.getLogger().addHandler(logging.StreamHandler())
else:
logging.basicConfig(level=logging.WARN,
format='%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s',
datefmt='%m/%d/%Y %I:%M:%S',
filename=args.outdir + "/decode.log")
logging.getLogger().addHandler(logging.StreamHandler())
logging.warn("logging is disabled.")
# fix seed
os.environ['PYTHONHASHSEED'] = str(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
# load config
config = torch.load(args.config)
# get source feat list
if os.path.isdir(args.feats):
feat_list = sorted(find_files(args.feats, "*.h5"))
elif os.path.isfile(args.feats):
feat_list = read_txt(args.feats)
else:
logging.error("--feats should be directory or list.")
sys.exit(1)
# get target feat list
if os.path.isdir(args.feats_trg):
feat_trg_list = sorted(find_files(args.feats_trg, "*.h5"))
elif os.path.isfile(args.feats_trg):
feat_trg_list = read_txt(args.feats_trg)
else:
logging.error("--feats_trg should be directory or list.")
sys.exit(1)
# prepare the file list for parallel decoding
feat_lists = np.array_split(feat_list, args.n_gpus)
feat_lists = [f_list.tolist() for f_list in feat_lists]
feat_trg_lists = np.array_split(feat_trg_list, args.n_gpus)
feat_trg_lists = [f_list.tolist() for f_list in feat_trg_lists]
spk_src = os.path.basename(os.path.dirname(feat_lists[0][0]))
spk_trg = os.path.basename(os.path.dirname(feat_trg_lists[0][0]))
gv_mean_src = read_hdf5(args.stats_src, "/gv_range_mean")[1:]
gv_mean_trg = read_hdf5(args.stats_trg, "/gv_range_mean")[1:]
# define gpu decode function
def gpu_decode(feat_list, feat_trg_list, gpu, cvlist=None, mcdlist=None, mcdstdlist=None, mcdpowlist=None, mcdpowstdlist=None, cvlist_src=None, mcdlist_src=None, mcdstdlist_src=None, mcdpowlist_src=None, mcdpowstdlist_src=None, cvlist_trg=None, mcdlist_trg=None, mcdstdlist_trg=None, mcdpowlist_trg=None, mcdpowstdlist_trg=None, lat_dist_rmse_enc_list=None, lat_dist_cosim_enc_list=None, lat_dist_rmse_pri_list=None, lat_dist_cosim_pri_list=None):
with torch.cuda.device(gpu):
mean_jnt = torch.FloatTensor(read_hdf5(args.stats_jnt, "/mean_feat_org_lf0_jnt")[config.stdim:]).cuda()
std_jnt = torch.FloatTensor(read_hdf5(args.stats_jnt, "/scale_feat_org_lf0_jnt")[config.stdim:]).cuda()
# define model and load parameters
logging.info("model")
logging.info(config)
with torch.no_grad():
model_encoder = GRU_RNN(
in_dim=config.in_dim,
out_dim=config.lat_dim*2,
hidden_layers=config.hidden_layers,
hidden_units=config.hidden_units,
kernel_size=config.kernel_size,
dilation_size=config.dilation_size,
scale_out_flag=False)
model_decoder = GRU_RNN(
in_dim=config.lat_dim+2,
out_dim=config.out_dim,
hidden_layers=config.hidden_layers,
hidden_units=config.hidden_units,
kernel_size=config.kernel_size,
dilation_size=config.dilation_size,
scale_in_flag=False)
model_encoder.load_state_dict(torch.load(args.model)["model_encoder"])
model_decoder.load_state_dict(torch.load(args.model)["model_decoder"])
model_encoder.cuda()
model_decoder.cuda()
model_encoder.eval()
model_decoder.eval()
for param in model_encoder.parameters():
param.requires_grad = False
for param in model_decoder.parameters():
param.requires_grad = False
logging.info(model_encoder)
logging.info(model_decoder)
init_pp = np.zeros((1,1,config.lat_dim*2))
y_in_pp = torch.FloatTensor(init_pp).cuda()
y_in_src = y_in_trg = torch.unsqueeze(torch.unsqueeze((0-mean_jnt)/std_jnt,0),0)
for feat_file, feat_trg_file in zip(feat_list, feat_trg_list):
# convert mcep
logging.info("cvmcep " + feat_file + " " + feat_trg_file)
feat = read_hdf5(feat_file, "/feat_org_lf0")
feat_trg = read_hdf5(feat_trg_file, "/feat_org_lf0")
logging.info(feat.shape)
logging.info(feat_trg.shape)
with torch.no_grad():
lat_src, _, _ = model_encoder(torch.FloatTensor(feat).cuda(), y_in_pp, clamp_vae=True, lat_dim=config.lat_dim)
lat_feat = sampling_vae_batch(lat_src.unsqueeze(0).repeat(args.n_smpl_dec,1,1), lat_dim=config.lat_dim)
lat_feat = torch.mean(lat_feat, 0)
lat_trg, _, _ = model_encoder(torch.FloatTensor(feat_trg).cuda(), y_in_pp, clamp_vae=True, lat_dim=config.lat_dim)
lat_feat_trg = sampling_vae_batch(lat_trg.unsqueeze(0).repeat(args.n_smpl_dec,1,1), lat_dim=config.lat_dim)
lat_feat_trg = torch.mean(lat_feat_trg, 0)
src_code = np.zeros((lat_feat.shape[0],2))
trg_code = np.zeros((lat_feat.shape[0],2))
trg_trg_code = np.zeros((lat_feat_trg.shape[0],2))
src_code[:,0] = 1
trg_code[:,1] = 1
trg_trg_code[:,1] = 1
src_code = torch.FloatTensor(src_code).cuda()
trg_code = torch.FloatTensor(trg_code).cuda()
trg_trg_code = torch.FloatTensor(trg_trg_code).cuda()
cvmcep, _, _ = model_decoder(torch.cat((trg_code, lat_feat),1), y_in_trg)
cvmcep = np.array(cvmcep.cpu().data.numpy(), dtype=np.float64)
cvmcep_src, _, _ = model_decoder(torch.cat((src_code, lat_feat),1), y_in_src)
cvmcep_src = np.array(cvmcep_src.cpu().data.numpy(), dtype=np.float64)
cvmcep_trg, _, _ = model_decoder(torch.cat((trg_trg_code, lat_feat_trg),1), y_in_trg)
cvmcep_trg = np.array(cvmcep_trg.cpu().data.numpy(), dtype=np.float64)
logging.info(cvmcep.shape)
logging.info(cvmcep_trg.shape)
cvlist.append(np.var(cvmcep[:,1:], axis=0))
cvlist_src.append(np.var(cvmcep_src[:,1:], axis=0))
cvlist_trg.append(np.var(cvmcep_trg[:,1:], axis=0))
logging.info(len(cvlist))
spcidx_src = read_hdf5(feat_file, "/spcidx_range")[0]
mcep_trg = read_hdf5(feat_trg_file, "/mcepspc_range")
_, _, _, mcdpow_arr = dtw.dtw_org_to_trg(np.array(cvmcep[np.array(spcidx_src),:], dtype=np.float64), np.array(mcep_trg[:,:], dtype=np.float64))
_, _, _, mcd_arr = dtw.dtw_org_to_trg(np.array(cvmcep[np.array(spcidx_src),1:], dtype=np.float64), np.array(mcep_trg[:,1:], dtype=np.float64))
mcdpow_mean = np.mean(mcdpow_arr)
mcdpow_std = np.std(mcdpow_arr)
mcd_mean = np.mean(mcd_arr)
mcd_std = np.std(mcd_arr)
logging.info("mcdpow: %.6f dB +- %.6f" % (mcdpow_mean, mcdpow_std))
logging.info("mcd: %.6f dB +- %.6f" % (mcd_mean, mcd_std))
mcdpowlist.append(mcdpow_mean)
mcdpowstdlist.append(mcdpow_std)
mcdlist.append(mcd_mean)
mcdstdlist.append(mcd_std)
mcep_src = read_hdf5(feat_file, "/mcepspc_range")
_, mcdpow_arr = dtw.calc_mcd(np.array(mcep_src[:,:], dtype=np.float64), np.array(cvmcep_src[np.array(spcidx_src),:], dtype=np.float64))
_, mcd_arr = dtw.calc_mcd(np.array(mcep_src[:,1:], dtype=np.float64), np.array(cvmcep_src[np.array(spcidx_src),1:], dtype=np.float64))
mcdpow_mean = np.mean(mcdpow_arr)
mcdpow_std = np.std(mcdpow_arr)
mcd_mean = np.mean(mcd_arr)
mcd_std = np.std(mcd_arr)
logging.info("mcdpow_src: %.6f dB +- %.6f" % (mcdpow_mean, mcdpow_std))
logging.info("mcd_src: %.6f dB +- %.6f" % (mcd_mean, mcd_std))
mcdpowlist_src.append(mcdpow_mean)
mcdpowstdlist_src.append(mcdpow_std)
mcdlist_src.append(mcd_mean)
mcdstdlist_src.append(mcd_std)
spcidx_trg = read_hdf5(feat_trg_file, "/spcidx_range")[0]
_, mcdpow_arr = dtw.calc_mcd(np.array(mcep_trg[:,:], dtype=np.float64), np.array(cvmcep_trg[np.array(spcidx_trg),:], dtype=np.float64))
_, mcd_arr = dtw.calc_mcd(np.array(mcep_trg[:,1:], dtype=np.float64), np.array(cvmcep_trg[np.array(spcidx_trg),1:], dtype=np.float64))
mcdpow_mean = np.mean(mcdpow_arr)
mcdpow_std = np.std(mcdpow_arr)
mcd_mean = np.mean(mcd_arr)
mcd_std = np.std(mcd_arr)
logging.info("mcdpow_trg: %.6f dB +- %.6f" % (mcdpow_mean, mcdpow_std))
logging.info("mcd_trg: %.6f dB +- %.6f" % (mcd_mean, mcd_std))
mcdpowlist_trg.append(mcdpow_mean)
mcdpowstdlist_trg.append(mcdpow_std)
mcdlist_trg.append(mcd_mean)
mcdstdlist_trg.append(mcd_std)
with torch.no_grad():
spcidx_src = torch.LongTensor(spcidx_src).cuda()
spcidx_trg = torch.LongTensor(spcidx_trg).cuda()
trj_lat_src = np.array(torch.index_select(lat_src,0,spcidx_src).cpu().data.numpy(), dtype=np.float64)
trj_lat_trg = np.array(torch.index_select(lat_trg,0,spcidx_trg).cpu().data.numpy(), dtype=np.float64)
aligned_lat_srctrg, _, _, _ = dtw.dtw_org_to_trg(trj_lat_src, trj_lat_trg)
lat_dist_srctrg = np.mean(np.sqrt(np.mean((aligned_lat_srctrg-trj_lat_trg)**2, axis=0)))
_, _, lat_cdist_srctrg, _ = dtw.dtw_org_to_trg(trj_lat_trg, trj_lat_src, mcd=0)
aligned_lat_trgsrc, _, _, _ = dtw.dtw_org_to_trg(trj_lat_trg, trj_lat_src)
lat_dist_trgsrc = np.mean(np.sqrt(np.mean((aligned_lat_trgsrc-trj_lat_src)**2, axis=0)))
_, _, lat_cdist_trgsrc, _ = dtw.dtw_org_to_trg(trj_lat_src, trj_lat_trg, mcd=0)
logging.info("%lf %lf %lf %lf" % (lat_dist_srctrg, lat_cdist_srctrg, lat_dist_trgsrc, lat_cdist_trgsrc))
lat_dist_rmse = (lat_dist_srctrg+lat_dist_trgsrc)/2
lat_dist_cosim = (lat_cdist_srctrg+lat_cdist_trgsrc)/2
lat_dist_rmse_enc_list.append(lat_dist_rmse)
lat_dist_cosim_enc_list.append(lat_dist_cosim)
logging.info("lat_dist_enc: %.6f %.6f" % (lat_dist_rmse, lat_dist_cosim))
trj_lat_src = np.array(torch.index_select(lat_feat,0,spcidx_src).cpu().data.numpy(), dtype=np.float64)
trj_lat_trg = np.array(torch.index_select(lat_feat_trg,0,spcidx_trg).cpu().data.numpy(), dtype=np.float64)
aligned_lat_srctrg, _, _, _ = dtw.dtw_org_to_trg(trj_lat_src, trj_lat_trg)
lat_dist_srctrg = np.mean(np.sqrt(np.mean((aligned_lat_srctrg-trj_lat_trg)**2, axis=0)))
_, _, lat_cdist_srctrg, _ = dtw.dtw_org_to_trg(trj_lat_trg, trj_lat_src, mcd=0)
aligned_lat_trgsrc, _, _, _ = dtw.dtw_org_to_trg(trj_lat_trg, trj_lat_src)
lat_dist_trgsrc = np.mean(np.sqrt(np.mean((aligned_lat_trgsrc-trj_lat_src)**2, axis=0)))
_, _, lat_cdist_trgsrc, _ = dtw.dtw_org_to_trg(trj_lat_src, trj_lat_trg, mcd=0)
logging.info("%lf %lf %lf %lf" % (lat_dist_srctrg, lat_cdist_srctrg, lat_dist_trgsrc, lat_cdist_trgsrc))
lat_dist_rmse = (lat_dist_srctrg+lat_dist_trgsrc)/2
lat_dist_cosim = (lat_cdist_srctrg+lat_cdist_trgsrc)/2
lat_dist_rmse_pri_list.append(lat_dist_rmse)
lat_dist_cosim_pri_list.append(lat_dist_cosim)
logging.info("lat_dist_pri: %.6f %.6f" % (lat_dist_rmse, lat_dist_cosim))
# parallel decode training
with mp.Manager() as manager:
gpu = 0
processes = []
cvlist = manager.list()
mcdlist = manager.list()
mcdstdlist = manager.list()
mcdpowlist = manager.list()
mcdpowstdlist = manager.list()
cvlist_src = manager.list()
mcdlist_src = manager.list()
mcdstdlist_src = manager.list()
mcdpowlist_src = manager.list()
mcdpowstdlist_src = manager.list()
cvlist_trg = manager.list()
mcdlist_trg = manager.list()
mcdstdlist_trg = manager.list()
mcdpowlist_trg = manager.list()
mcdpowstdlist_trg = manager.list()
lat_dist_rmse_enc_list = manager.list()
lat_dist_cosim_enc_list = manager.list()
lat_dist_rmse_pri_list = manager.list()
lat_dist_cosim_pri_list = manager.list()
for i, (feat_list, feat_trg_list) in enumerate(zip(feat_lists, feat_trg_lists)):
logging.info(i)
p = mp.Process(target=gpu_decode, args=(feat_list, feat_trg_list, gpu, cvlist, mcdlist, mcdstdlist, mcdpowlist, mcdpowstdlist, cvlist_src, mcdlist_src, mcdstdlist_src, mcdpowlist_src, mcdpowstdlist_src, cvlist_trg, mcdlist_trg, mcdstdlist_trg, mcdpowlist_trg, mcdpowstdlist_trg, lat_dist_rmse_enc_list, lat_dist_cosim_enc_list, lat_dist_rmse_pri_list, lat_dist_cosim_pri_list,))
p.start()
processes.append(p)
gpu += 1
if (i + 1) % args.n_gpus == 0:
gpu = 0
# wait for all process
for p in processes:
p.join()
# calculate cv_gv statistics
cvgv_mean = np.mean(np.array(cvlist), axis=0)
cvgv_var = np.var(np.array(cvlist), axis=0)
cvgvsrc_mean = np.mean(np.array(cvlist_src), axis=0)
cvgvsrc_var = np.var(np.array(cvlist_src), axis=0)
cvgvtrg_mean = np.mean(np.array(cvlist_trg), axis=0)
cvgvtrg_var = np.var(np.array(cvlist_trg), axis=0)
logging.info(args.stats_src)
logging.info(args.stats_trg)
#logging.info(gv_mean_trg)
logging.info("mcdpow: %.6f dB (+- %.6f) +- %.6f (+- %.6f)" % (np.mean(np.array(mcdpowlist)),np.std(np.array(mcdpowlist)),np.mean(np.array(mcdpowstdlist)),np.std(np.array(mcdpowstdlist))))
logging.info("mcd: %.6f dB (+- %.6f) +- %.6f (+- %.6f)" % (np.mean(np.array(mcdlist)),np.std(np.array(mcdlist)),np.mean(np.array(mcdstdlist)),np.std(np.array(mcdstdlist))))
#logging.info(cvgv_mean)
logging.info("%lf +- %lf" % (np.mean(np.sqrt(np.square(np.log(cvgv_mean)-np.log(gv_mean_trg)))), np.std(np.sqrt(np.square(np.log(cvgv_mean)-np.log(gv_mean_trg))))))
logging.info("mcdpow_src: %.6f dB (+- %.6f) +- %.6f (+- %.6f)" % (np.mean(np.array(mcdpowlist_src)),np.std( | np.array(mcdpowlist_src) | numpy.array |
import numpy as np
import math
import random
from batchgenerators.transforms import AbstractTransform
import sys
sys.path.append(".")
from kits19cnn.io.custom_augmentations import foreground_crop, center_crop, \
random_resized_crop
class RandomResizedCropTransform(AbstractTransform):
"""
Crop the given array to random size and aspect ratio.
Doesn't resize across the depth dimenion (assumes it is dim=0) if
the data is 3D.
A crop of random size (default: of 0.08 to 1.0) of the original size and a
random aspect ratio (default: of 3/4 to 4/3) of the original aspect ratio
is made. This crop is finally resized to given size.
This is popularly used to train the Inception networks.
Assumes the data and segmentation masks are the same size.
"""
def __init__(self, target_size, scale=(0.08, 1.0),
ratio=(3. / 4., 4. / 3.),
data_key="data", label_key="seg", p_per_sample=0.33,
crop_kwargs={}, resize_kwargs={}):
"""
Attributes:
pass
"""
if len(target_size) > 2:
print("Currently only adjusts the aspect ratio for the 2D dims.")
if (scale[0] > scale[1]) or (ratio[0] > ratio[1]):
warnings.warn("range should be of kind (min, max)")
self.target_size = target_size
self.scale = scale
self.ratio = ratio
self.data_key = data_key
self.label_key = label_key
self.p_per_sample = p_per_sample
self.crop_kwargs = crop_kwargs
self.resize_kwargs = resize_kwargs
def _get_image_size(self, data):
"""
Assumes data has shape (b, c, h, w (, d)). Fetches the h, w, and d.
depth if applicable.
"""
return data.shape[2:]
def get_crop_size(self, data, scale, ratio):
"""
Get parameters for ``crop`` for a random sized crop.
"""
shape_dims = self._get_image_size(data)
area = np.prod(shape_dims)
while True:
target_area = random.uniform(*scale) * area
log_ratio = (math.log(ratio[0]), math.log(ratio[1]))
aspect_ratio = math.exp(random.uniform(*log_ratio))
w = int(round(math.sqrt(target_area * aspect_ratio)))
h = int(round(math.sqrt(target_area / aspect_ratio)))
if len(shape_dims) == 3:
depth = shape_dims[0]
crop_size = | np.array([depth, h, w]) | numpy.array |
import numpy as np
import random
import pyflex
from softgym.envs.cloth_env import ClothEnv
from copy import deepcopy
from softgym.utils.misc import vectorized_range, vectorized_meshgrid
from softgym.utils.pyflex_utils import center_object
class ClothFlattenEnv(ClothEnv):
def __init__(self, cached_states_path='cloth_flatten_init_states.pkl', **kwargs):
"""
:param cached_states_path:
:param num_picker: Number of pickers if the aciton_mode is picker
:param kwargs:
"""
super().__init__(**kwargs)
self.get_cached_configs_and_states(cached_states_path, self.num_variations)
self.prev_covered_area = None # Should not be used until initialized
def generate_env_variation(self, num_variations=1, vary_cloth_size=True):
""" Generate initial states. Note: This will also change the current states! """
max_wait_step = 300 # Maximum number of steps waiting for the cloth to stablize
stable_vel_threshold = 0.01 # Cloth stable when all particles' vel are smaller than this
generated_configs, generated_states = [], []
default_config = self.get_default_config()
for i in range(num_variations):
config = deepcopy(default_config)
self.update_camera(config['camera_name'], config['camera_params'][config['camera_name']])
if vary_cloth_size:
cloth_dimx, cloth_dimy = self._sample_cloth_size()
config['ClothSize'] = [cloth_dimx, cloth_dimy]
else:
cloth_dimx, cloth_dimy = config['ClothSize']
self.set_scene(config)
self.action_tool.reset([0., -1., 0.])
pos = pyflex.get_positions().reshape(-1, 4)
pos[:, :3] -= np.mean(pos, axis=0)[:3]
if self.action_mode in ['sawyer', 'franka']: # Take care of the table in robot case
pos[:, 1] = 0.57
else:
pos[:, 1] = 0.005
pos[:, 3] = 1
pyflex.set_positions(pos.flatten())
pyflex.set_velocities(np.zeros_like(pos))
pyflex.step()
num_particle = cloth_dimx * cloth_dimy
pickpoint = random.randint(0, num_particle - 1)
curr_pos = pyflex.get_positions()
original_inv_mass = curr_pos[pickpoint * 4 + 3]
curr_pos[pickpoint * 4 + 3] = 0 # Set the mass of the pickup point to infinity so that it generates enough force to the rest of the cloth
pickpoint_pos = curr_pos[pickpoint * 4: pickpoint * 4 + 3].copy() # Pos of the pickup point is fixed to this point
pickpoint_pos[1] += np.random.random(1) * 0.5 + 0.5
pyflex.set_positions(curr_pos)
# Pick up the cloth and wait to stablize
for j in range(0, max_wait_step):
curr_pos = pyflex.get_positions()
curr_vel = pyflex.get_velocities()
curr_pos[pickpoint * 4: pickpoint * 4 + 3] = pickpoint_pos
curr_vel[pickpoint * 3: pickpoint * 3 + 3] = [0, 0, 0]
pyflex.set_positions(curr_pos)
pyflex.set_velocities(curr_vel)
pyflex.step()
if np.alltrue(np.abs(curr_vel) < stable_vel_threshold) and j > 5:
break
# Drop the cloth and wait to stablize
curr_pos = pyflex.get_positions()
curr_pos[pickpoint * 4 + 3] = original_inv_mass
pyflex.set_positions(curr_pos)
for _ in range(max_wait_step):
pyflex.step()
curr_vel = pyflex.get_velocities()
if np.alltrue(curr_vel < stable_vel_threshold):
break
center_object()
if self.action_mode == 'sphere' or self.action_mode.startswith('picker'):
curr_pos = pyflex.get_positions()
self.action_tool.reset(curr_pos[pickpoint * 4:pickpoint * 4 + 3] + [0., 0.2, 0.])
generated_configs.append(deepcopy(config))
generated_states.append(deepcopy(self.get_state()))
self.current_config = config # Needed in _set_to_flatten function
generated_configs[-1]['flatten_area'] = self._set_to_flatten() # Record the maximum flatten area
print('config {}: camera params {}, flatten area: {}'.format(i, config['camera_params'], generated_configs[-1]['flatten_area']))
return generated_configs, generated_states
def _set_to_flatten(self):
# self._get_current_covered_area(pyflex.get_positions().reshape(-))
cloth_dimx, cloth_dimz = self.get_current_config()['ClothSize']
N = cloth_dimx * cloth_dimz
px = np.linspace(0, cloth_dimx * self.cloth_particle_radius, cloth_dimx)
py = np.linspace(0, cloth_dimz * self.cloth_particle_radius, cloth_dimz)
xx, yy = np.meshgrid(px, py)
new_pos = np.empty(shape=(N, 4), dtype=np.float)
new_pos[:, 0] = xx.flatten()
new_pos[:, 1] = self.cloth_particle_radius
new_pos[:, 2] = yy.flatten()
new_pos[:, 3] = 1.
new_pos[:, :3] -= np.mean(new_pos[:, :3], axis=0)
pyflex.set_positions(new_pos.flatten())
return self._get_current_covered_area(new_pos)
def _reset(self):
""" Right now only use one initial state"""
self.prev_covered_area = self._get_current_covered_area(pyflex.get_positions())
if hasattr(self, 'action_tool'):
curr_pos = pyflex.get_positions()
cx, cy = self._get_center_point(curr_pos)
self.action_tool.reset([cx, 0.2, cy])
pyflex.step()
self.init_covered_area = None
info = self._get_info()
self.init_covered_area = info['performance']
return self._get_obs()
def _step(self, action):
self.action_tool.step(action)
if self.action_mode in ['sawyer', 'franka']:
pyflex.step(self.action_tool.next_action)
else:
pyflex.step()
return
def _get_current_covered_area(self, pos):
"""
Calculate the covered area by taking max x,y cood and min x,y coord, create a discritized grid between the points
:param pos: Current positions of the particle states
"""
pos = np.reshape(pos, [-1, 4])
min_x = | np.min(pos[:, 0]) | numpy.min |
# -*- coding: utf-8 -*-
import numpy as np
def cos_sim(a, b):
"""Takes 2 vectors a, b and returns the cosine similarity according
to the definition of the dot product
"""
dot_product = np.dot(a, b)
norm_a = np.linalg.norm(a)
norm_b = np.linalg.norm(b)
return dot_product / (norm_a * norm_b)
# the counts we computed above
sentence_m = | np.array([1, 1, 1, 1, 0, 0, 0, 0, 0]) | numpy.array |
# -*- coding: utf-8 -*-
"""
Created on Thu May 28 19:31:48 2020
@author: Rushad
"""
import warnings
import numpy as np
from scipy import signal, polyval
import matplotlib.pyplot as plt
warnings.filterwarnings("ignore")
class TransferFunction():
'''
Define the Transfer Functions in standard form only.
'''
def __init__(self, num_coef, den_coef):
'''
Parameters
----------
num_coef : numpy array OR list
DESCRIPTION. Coefficient of Transfer Function's Numerator
den_coef : TYPE numpy array OR list
DESCRIPTION. Coefficient of Transfer Function's Denominator
Returns
-------
None.
'''
self.num_coef = np.array(num_coef)
self.den_coef = np.array(den_coef)
self.num_coef = self.num_coef.reshape([len(self.num_coef), 1])
self.den_coef = self.den_coef.reshape([len(self.den_coef), 1])
self.order = max(len(self.num_coef), len(self.den_coef)) - 1
def display(self):
'''
Displays TF block
'''
num_str = ""
for n in range(len(self.num_coef)):
if n < len(self.num_coef)-1: #if not last
if n != len(self.num_coef)-2: #if not second last
if self.num_coef[n] != 1 and self.num_coef[n] != 0: #if coef is not zero and one
num_str = num_str + str(float(self.num_coef[n])) + "*S^" + str(abs(n-len(self.num_coef)+1)) + " + "
elif self.num_coef[n] == 1: #if coef is one
num_str = num_str + "S^" + str(abs(n-len(self.num_coef)+1)) + " + "
elif self.num_coef[n] == 0: #if coef is zero
pass
else: #if second last
if self.num_coef[n] != 1 and self.num_coef[n] != 0: #if coef is not zero and one
num_str = num_str + str(float(self.num_coef[n])) + "*S" + " + "
elif self.num_coef[n] == 1: #if coef is one
num_str = num_str + "S" + " + "
elif self.num_coef[n] == 0: #if coef is zero
pass
else: #if last
if self.num_coef[n] != 0: #if coef is not zero
num_str = num_str + str(float(self.num_coef[n]))
elif self.num_coef[n] == 0: #if coef is zero
num_str = num_str[:-3]
den_str = ""
for d in range(len(self.den_coef)):
if d < len(self.den_coef)-1: #if not last
if d != len(self.den_coef)-2: #if not second last
if self.den_coef[d] != 1 and self.den_coef[d] != 0: #if coef not zero and one
den_str = den_str + str(float(self.den_coef[d])) + "*S^" + str(abs(d-len(self.den_coef)+1)) + " + "
elif self.den_coef[d] == 1: #if coef is one
den_str = den_str + "S^" + str(abs(d-len(self.den_coef)+1)) + " + "
elif self.den_coef[d] == 0: #if coef is zero
pass
else: #if second last
if self.den_coef[d] != 1 and self.den_coef[d] != 0: #if coef is not zero and one
den_str = den_str + str(float(self.den_coef[d])) + "*S" + " + "
elif self.den_coef[d] == 1: #if coef is one
den_str = den_str + "S" + " + "
elif self.den_coef[d] == 0: #if coef is zero
pass
else: #if last
if self.den_coef[d] != 0: #if coef is not zero
den_str = den_str + str(float(self.den_coef[d]))
elif self.den_coef[d] == 0: #if coef is zero
den_str = den_str[:-3]
div_line_len = max(len(num_str), len(den_str))
div_line = div_line_len*"-"
tf_disp = str(num_str + " \n" + div_line + " \n" + den_str)
print(tf_disp)
def parameters(self, settling_time_tolerance=0.02):
'''
Parameters
----------
settling_time_tolerance : float, optional
DESCRIPTION. Tolerance limit for error in settling time. The default is 0.05 (5%)
Returns
-------
parameter : dictionary
DESCRIPTION. Dictionary containing all the parameters/time domain specifications
'''
self.order = max(len(self.num_coef), len(self.den_coef)) - 1
self.settling_time_tolerance = settling_time_tolerance
if self.order == 1:
self.gain = float(self.num_coef[0])
self.time_constant = float(self.den_coef[0])
parameter = {"Order":self.order, "Gain":self.gain, "Time Constant":self.time_constant}
return parameter
elif self.order == 2:
self.gain = float(self.num_coef[0]/self.den_coef[2])
self.natural_frequency = float(np.sqrt(self.den_coef[2]))
self.damping_ratio = float(self.den_coef[1]/(2*self.natural_frequency))
self.damped_freq = self.natural_frequency*np.sqrt(abs(1 - self.damping_ratio**2))
self.phase_angle = float(np.arctan(np.sqrt(np.abs(1 - self.damping_ratio**2))/self.damping_ratio))
self.rise_time = float((np.pi - self.phase_angle)/(self.natural_frequency*np.sqrt(abs(1 - self.damping_ratio**2))))
self.peak_time = float(np.pi/(self.natural_frequency*np.sqrt((abs(1 - self.damping_ratio**2)))))
self.max_overshoot = float(np.exp((-self.damping_ratio*np.pi)/(np.sqrt(abs( 1 - self.damping_ratio**2)))*100))
self.settling_time = float(-np.log(self.settling_time_tolerance*np.sqrt(abs(1 - self.damping_ratio**2)))/(self.damping_ratio*self.natural_frequency))
parameter = {"Order":self.order, "Gain":self.gain,"Natural Frequency":self.natural_frequency, "Damping Frequency":self.damped_freq, "Damping Ratio":self.damping_ratio, "Phase Angle":self.phase_angle, "Rise Time":self.rise_time, "Peak Time":self.peak_time, "Max Overshoot":self.max_overshoot, "Settling Time":self.settling_time}
return parameter
elif self.order > 2:
print("[WARNING] You have inputed a system of Order:" + str(max(len(self.num_coef), len(self.den_coef))-1) + ". Currently supports first and second order systems")
def response(self, input_type, time_period=10, sample_time=0.05, ret=False, show=True):
'''
Parameters
----------
input_type : string
DESCRIPTION. input signal type: impulse, step or ramp
time_period : integer, optional
DESCRIPTION. The time duration the signal is processed for. The default is 10.
sample_time : float, optional
DESCRIPTION. Sample time of the signal. The default is 0.05.
ret : bool, optional
DESCRIPTION. Set to True if the systems response is to be returned. The default is False.
show : bool, optional
DESCRIPTION. Set to True if the systems response is to be displayed. The default is True.
Returns
-------
resp : numpy array
DESCRIPTION. numpy array of response of the system. Is only returned if ret is set to True
'''
controller_time = np.array([i for i in np.arange(0, time_period, sample_time)])
input_resp = {"impulse":"impulse(self)", "step":"step(self)", "ramp":"ramp(self)"}
def impulse(self):
sys = signal.lti(self.num_coef.reshape(len(self.num_coef)), self.den_coef.reshape(len(self.den_coef)))
_,resp = signal.impulse(sys, T=controller_time)
return resp
def step(self):
sys = signal.lti(self.num_coef.reshape(len(self.num_coef)), self.den_coef.reshape(len(self.den_coef)))
_,resp = signal.step(sys, T=controller_time)
return resp
def ramp(self):
def ramp_order1(self):
resp = float(self.num_coef[0])*(float(-self.den_coef[0]) + controller_time + np.exp(-controller_time/float(self.den_coef[0])))
return resp
def ramp_order2(self):
natural_frequency = float(np.sqrt(self.den_coef[2]))
damping_ratio = float(self.den_coef[1]/(2*natural_frequency))
if 0 <= float(damping_ratio) < 1:
resp = (1/natural_frequency**2)*((controller_time + (np.exp(-damping_ratio*natural_frequency*controller_time)/natural_frequency)*((2*damping_ratio*np.cos(natural_frequency*np.sqrt(1 - damping_ratio**2)*controller_time)) + (((2*damping_ratio**2 -1)/np.sqrt(1 - damping_ratio**2))*np.sin(natural_frequency*np.sqrt(1 - damping_ratio**2)*controller_time))) - (2*damping_ratio/natural_frequency)))
elif float(damping_ratio) == 1:
resp = (1/natural_frequency**2)*(controller_time + ((2*np.exp(-natural_frequency*controller_time))/natural_frequency) + (controller_time*np.exp(-natural_frequency*controller_time)) - (2/natural_frequency))
elif float(damping_ratio) > 1:
resp = (1/damping_ratio**2)*(controller_time + (natural_frequency/(2*np.sqrt(np.abs(1 - damping_ratio**2))))*((((1/((damping_ratio*natural_frequency) - np.sqrt(np.abs(1 - damping_ratio**2))*natural_frequency))**2)*np.exp(-controller_time/(1/((damping_ratio*natural_frequency) - np.sqrt(np.abs(1 - damping_ratio**2))*natural_frequency)))) - (((1/((damping_ratio*natural_frequency) + np.sqrt(np.abs(1 - damping_ratio**2))*natural_frequency))**2)*(np.exp(-controller_time/(1/((damping_ratio*natural_frequency) + np.sqrt(np.abs(1 - damping_ratio**2))*natural_frequency)))))) - (2*damping_ratio/natural_frequency))
return resp
if self.order == 1:
resp = ramp_order1(self)
return resp
elif self.order == 2:
resp = float(self.num_coef[0]/self.den_coef[2])*ramp_order2(self)
return resp
elif self.order > 2:
print("[WARNING] You have inputed a system of Order:" + str(max(len(self.num_coef), len(self.den_coef))-1) + ". Ramp response currently supports first and second order systems")
resp = eval(input_resp[input_type])
if show == True:
plt.plot(controller_time, resp)
plt.show()
if ret == True:
return resp
def pzplot(self, ret=True):
'''
Plots Pole-Zero plot of the system
'''
if len(self.num_coef) >= 1:
self.zeros = np.roots(self.num_coef.reshape(len(self.num_coef)))
plt.plot(self.zeros.real, self.zeros.imag, "o", label="Zeros")
if len(self.den_coef) >= 1:
self.poles = np.roots(self.den_coef.reshape(len(self.den_coef)))
plt.plot(self.poles.real, self.poles.imag, "x", label="Poles")
plt.xlabel('Re')
plt.ylabel('Im')
plt.grid(True, which="both")
plt.legend()
plt.show()
if ret == True:
return self.poles, self.zeros
def stability(self):
'''
Returns
-------
state : String
Prints stability of the system
'''
if len(self.den_coef >= 1):
poles = np.roots(self.den_coef.reshape(len(self.den_coef)))
poles_round = np.array(poles.imag, dtype="int")
if (poles.real < 0).all():
state = "System is Stable"
elif np.count_nonzero(poles_round) != len(poles_round) and (poles.real <= 0).all():
if np.sum(poles) == np.sum(np.unique(poles)):
state = "System is Marginally Stable"
else:
state = "System in Unstable"
else:
state = "System is Unstable"
return state
def convert2SS(self):
'''
Returns
-------
SpaceState object
DESCRIPTION. Converts TransferFunction object to StateSpace object
'''
A,B,C,D = signal.tf2ss(self.num_coef.reshape(-1), self.den_coef.reshape(-1))
self.state_space_rep = StateSpace(A,B,C,D)
return self.state_space_rep
class feedback(TransferFunction):
'''
Add feedback TF to open loop TF. Define in standard form only.
'''
def __init__(self, G, H=1.0, feedback_type="negative"):
'''
Parameters
----------
G : TransferFunction object
DESCRIPTION. TF the feedback is to be implemented on
H : TransferFunction object / integer / float, optional
DESCRIPTION. Feedback block. The default is 1 (unity feedback)
feedback_type : Negative or Positive feedback, optional
DESCRIPTION. The default is "negative".
Returns
-------
None.
'''
if type(H) == TransferFunction:
G_num = G.num_coef
G_num = G.num_coef.reshape(len(G_num))
G_den = G.den_coef
G_den = G.den_coef.reshape(len(G_den))
H_num = H.num_coef
H_num = H.num_coef.reshape(len(H_num))
H_den = H.den_coef
H_den = H.den_coef.reshape(len(H_den))
if feedback_type == "negative":
feedback_num = np.polymul(G_num, H_den)
feedback_den = np.polyadd(np.polymul(G_den, H_den), np.polymul(G_den, H_num))
elif feedback_type == "positive":
feedback_num = np.polymul(G_num, H_den)
feedback_den = np.polysub(np.polymul(G_den, H_den), np.polymul(G_den, H_num))
elif type(H) == float or type(H) == int:
num = G.num_coef
den = G.den_coef
if feedback_type == "negative":
feedback_den0 = float(den[0])
feedback_den1 = float(den[1])
feedback_den2 = float(den[2] + (num[-1]/H))
elif feedback_type == "positive":
feedback_den0 = float(den[0])
feedback_den1 = float(den[1])
feedback_den2 = float(den[2] - (num[-1]/H))
feedback_num = num
feedback_den = np.array([feedback_den0, feedback_den1, feedback_den2])
feedback_num = feedback_num.reshape([len(feedback_num), 1])
feedback_den = feedback_den.reshape([len(feedback_den), 1])
self.num_coef = feedback_num
self.den_coef = feedback_den
self.feedback_tf = TransferFunction(self.num_coef, self.den_coef)
self.order = self.feedback_tf.order
class PID():
'''
PID control on a TF
'''
def __init__(self, K_p, K_i, K_d, tf):
'''
Parameters
----------
K_p : float
DESCRIPTION. Proportional Gain
K_i : float
DESCRIPTION. Integral Gain
K_d : float
DESCRIPTION. Derivative Gain
tf : TranferFunction object
DESCRIPTION. TF on which PID is to be implemeted
Returns
-------
None.
'''
self.K_p = K_p
self.K_i = K_i
self.K_d = K_d
self.tf = tf
pid_num = [self.K_d, self.K_p, self.K_i]
pid_den = [1, 0]
num = tf.num_coef
den = tf.den_coef
tf_num = list(tf.num_coef.reshape(len(num),))
tf_den = list(tf.den_coef.reshape(len(den),))
num_diff = len(pid_num) - len(tf_num)
den_diff = len(pid_den) - len(tf_den)
try:
if len(tf_num) < len(pid_num):
temp_num = np.zeros(num_diff)
tf_num = np.concatenate((temp_num, tf_num))
elif len(tf_num) > len(pid_num):
temp_num = np.zeros(abs(num_diff))
pid_num = np.concatenate((temp_num, pid_num))
if len(tf_den) < len(pid_den):
temp_den = np.zeros(den_diff)
tf_den = np.concatenate((temp_den, tf_den))
elif len(tf_den) > len(pid_den):
temp_den = np.zeros(abs(den_diff))
pid_den = np.concatenate((temp_den, pid_den))
except ValueError:
pass
reduced_tf_num = np.polymul(np.array(tf_num), np.array(pid_num))
reduced_tf_den = np.polymul(np.array(tf_den), np.array(pid_den))
self.reduced_tf = TransferFunction(reduced_tf_num, reduced_tf_den)
def display(self):
'''
Displays the PID TF block
'''
num_str = str(self.K_d) + "*S^2 + " + str(self.K_p) + "*S + " + str(self.K_i)
den_str = round(len(num_str)/2)*" " + "S" + " "*round(len(num_str)/2)
div_line_len = max(len(num_str), len(den_str))
div_line = div_line_len*"-"
pid_tf_disp = str(num_str + " \n" + div_line + " \n" + den_str)
print(pid_tf_disp)
def response(self, input_type, time_period=10, sample_time=0.05, ret=False, show=True):
'''
Parameters
----------
input_type : string
DESCRIPTION. input signal type: impulse, step or ramp
time_period : integer, optional
DESCRIPTION. The time duration the signal is processed for. The default is 10.
sample_time : float, optional
DESCRIPTION. Sample time of the signal. The default is 0.05.
ret : bool, optional
DESCRIPTION. Set to True if the systems response is to be returned. The default is False.
show : bool, optional
DESCRIPTION. Set to True if the systems response is to be displayed. The default is True.
Returns
-------
resp : numpy array
DESCRIPTION. numpy array of response of the system. Is only returned if ret is set to True
'''
try:
resp = self.reduced_tf.response(input_type, time_period, sample_time, ret, show)
if ret == True:
return resp
except ValueError:
print("Improper transfer function. `num` is longer than `den`.")
def tune(self, input_type="step", set_point=1, num_itr=70, rate=0.00000000001, lambd=0.7):
'''
Parameters
----------
input_type : input signal type, optional
DESCRIPTION. The default is "step" input.
set_point : Optimal steady state value, optional
DESCRIPTION. The default is 1.
num_itr : number of iterations, optional
DESCRIPTION. The default is 70. Might have to adjust this to prevent the cost from increasing after decreasing.
rate : learning rate, optional
DESCRIPTION. The default is 0.00000000001.
lambd : regularization coefficient, optional
DESCRIPTION. The default is 0.7
Returns
-------
k : numpy array
DESCRIPTION. numpy array of Kp, Ki, Kd values
'''
np.random.seed(1)
k = np.random.random(3).reshape(3,1)
def red_tf():
pid_num = [k[2][0], k[0][0], k[1][0]]
pid_den = [1, 0]
num = self.tf.num_coef
den = self.tf.den_coef
tf_num = list(self.tf.num_coef.reshape(len(num),))
tf_den = list(self.tf.den_coef.reshape(len(den),))
num_diff = len(pid_num) - len(tf_num)
den_diff = len(pid_den) - len(tf_den)
try:
if len(tf_num) < len(pid_num):
temp_num = np.zeros(num_diff)
tf_num = np.concatenate((temp_num, tf_num))
elif len(tf_num) > len(pid_num):
temp_num = np.zeros(abs(num_diff))
pid_num = np.concatenate((temp_num, pid_num))
if len(tf_den) < len(pid_den):
temp_den = | np.zeros(den_diff) | numpy.zeros |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.