prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# WikiGraphs is licensed under the terms of the Creative Commons
# Attribution-ShareAlike 4.0 International (CC BY-SA 4.0) license.
#
# WikiText-103 data (unchanged) is licensed by Salesforce.com, Inc. under the
# terms of the Creative Commons Attribution-ShareAlike 4.0 International
# (CC BY-SA 4.0) license. You can find details about CC BY-SA 4.0 at:
#
# https://creativecommons.org/licenses/by-sa/4.0/legalcode
#
# Freebase data is licensed by Google LLC under the terms of the Creative
# Commons CC BY 4.0 license. You may obtain a copy of the License at:
#
# https://creativecommons.org/licenses/by/4.0/legalcode
#
# ==============================================================================
"""Tests for wikigraphs.model.graph_net."""
from absl import logging
from absl.testing import absltest
import haiku as hk
import jax
import jax.numpy as jnp
import jraph
import numpy as np
import optax
from wikigraphs.model import graph_net as gn
class GraphNetTest(absltest.TestCase):
def test_node_classification(self):
# If node has more than 2 neighbors --> class 1, otherwise class 0.
# Graph structure:
# 1 4
# | \ / |
# | 0 - 3 |
# | / \ |
# 2 5
edges = np.array([
[0, 1],
[1, 2],
[2, 0],
[0, 3],
[3, 4],
[4, 5],
[5, 3],
], dtype=np.int32)
n_node = edges.max() + 1
n_edge = edges.shape[0]
g = jraph.GraphsTuple(
senders=edges[:, 0],
receivers=edges[:, 1],
edges=np.ones((edges.shape[0], 1), dtype=np.float32),
nodes=np.ones((n_node, 1), dtype=np.float32),
n_node=np.array([n_node], dtype=np.int32),
n_edge=np.array([n_edge], dtype=np.int32),
globals=None)
g = gn.add_reverse_edges(g)
targets = np.array([1, 0, 0, 1, 0, 0], dtype=np.int32)
n_classes = 2
def forward(graph, targets):
model = gn.SimpleGraphNet(num_layers=5, layer_norm=False)
graph = model(graph)
nodes = graph.nodes
logits = hk.Linear(n_classes)(nodes)
pred = logits.argmax(axis=-1)
accuracy = (pred == targets).mean()
targets = jax.nn.one_hot(targets, n_classes, dtype=jnp.float32)
return -jnp.mean(jnp.sum(
jax.nn.log_softmax(logits, axis=-1) * targets, axis=-1)), accuracy
init_fn, apply_fn = hk.without_apply_rng(hk.transform(forward))
rng = hk.PRNGSequence(0)
params = init_fn(next(rng), g, targets)
optimizer = optax.chain(
optax.scale_by_adam(),
optax.scale(-1e-3))
opt_state = optimizer.init(params)
apply_fn = jax.jit(apply_fn)
for i in range(500):
(loss, acc), grad = jax.value_and_grad(apply_fn,
has_aux=True)(params, g, targets)
updates, opt_state = optimizer.update(grad, opt_state, params)
params = optax.apply_updates(params, updates)
if (i + 1) % 100 == 0:
logging.info('Step %d, loss %.8f, accuracy %.4f', i + 1, loss, acc)
self.assertLess(loss, 0.01)
self.assertEqual(acc, 1.0)
def test_pad_size(self):
self.assertEqual(gn.pad_size(1), 1)
self.assertEqual(gn.pad_size(5), 8)
self.assertEqual(gn.pad_size(7), 8)
self.assertEqual(gn.pad_size(101), 128)
def test_pad_graphs(self):
# No new edges to add
graphs = jraph.GraphsTuple(
nodes=np.arange(6)[:, None],
edges=np.arange(4)[:, None],
senders=np.array([0, 2, 3, 4]),
receivers=np.array([1, 3, 4, 5]),
n_node=np.array([2, 4]),
n_edge=np.array([1, 3]),
globals=None)
padded = gn.pad_graphs(graphs)
np.testing.assert_array_equal(
padded.nodes,
np.array([0, 1, 2, 3, 4, 5, 0, 0])[:, None])
np.testing.assert_array_equal(padded.edges, graphs.edges)
np.testing.assert_array_equal(padded.senders, graphs.senders)
np.testing.assert_array_equal(padded.receivers, graphs.receivers)
np.testing.assert_array_equal(padded.n_node, [2, 4, 2])
np.testing.assert_array_equal(padded.n_edge, [1, 3, 0])
# Add just a single default node
graphs = jraph.GraphsTuple(
nodes=np.arange(7)[:, None],
edges=np.arange(5)[:, None],
senders=np.array([0, 2, 3, 5, 6]),
receivers=np.array([1, 3, 4, 6, 5]),
n_node=np.array([2, 3, 2]),
n_edge=np.array([1, 2, 2]),
globals=None)
padded = gn.pad_graphs(graphs)
np.testing.assert_array_equal(
padded.nodes,
np.array([0, 1, 2, 3, 4, 5, 6, 0])[:, None])
np.testing.assert_array_equal(
padded.edges,
np.array([0, 1, 2, 3, 4, 0, 0, 0])[:, None])
np.testing.assert_array_equal(
padded.senders,
[0, 2, 3, 5, 6, 7, 7, 7])
np.testing.assert_array_equal(
padded.receivers,
[1, 3, 4, 6, 5, 7, 7, 7])
np.testing.assert_array_equal(
padded.n_node, [2, 3, 2, 1])
np.testing.assert_array_equal(
padded.n_edge, [1, 2, 2, 3])
# Num. nodes is a power of 2 but we still pad at least one extra node
graphs = jraph.GraphsTuple(
nodes=np.arange(8)[:, None],
edges=np.arange(5)[:, None],
senders=np.array([0, 2, 3, 5, 6]),
receivers=np.array([1, 3, 4, 6, 7]),
n_node=np.array([2, 3, 3]),
n_edge=np.array([1, 2, 2]),
globals=None)
padded = gn.pad_graphs(graphs)
np.testing.assert_array_equal(
padded.nodes,
np.array([0, 1, 2, 3, 4, 5, 6, 7, 0, 0, 0, 0, 0, 0, 0, 0])[:, None])
np.testing.assert_array_equal(
padded.edges,
np.array([0, 1, 2, 3, 4, 0, 0, 0])[:, None])
np.testing.assert_array_equal(
padded.senders,
[0, 2, 3, 5, 6, 8, 8, 8])
np.testing.assert_array_equal(
padded.receivers,
[1, 3, 4, 6, 7, 8, 8, 8])
np.testing.assert_array_equal(
padded.n_node, [2, 3, 3, 8])
np.testing.assert_array_equal(
padded.n_edge, [1, 2, 2, 3])
def test_batch_graphs_by_device(self):
# batch 4 graphs for 2 devices
num_devices = 2
graphs = [
jraph.GraphsTuple(
nodes=np.arange(2)[:, None],
edges=np.arange(2)[:, None],
senders=np.array([0, 1]),
receivers=np.array([1, 0]),
n_node=np.array([2]),
n_edge=np.array([2]),
globals=None),
jraph.GraphsTuple(
nodes=np.arange(3)[:, None],
edges=np.arange(1)[:, None],
senders=np.array([2]),
receivers=np.array([0]),
n_node=np.array([3]),
n_edge=np.array([1]),
globals=None),
jraph.GraphsTuple(
nodes=np.arange(4)[:, None],
edges=np.arange(2)[:, None],
senders=np.array([1, 0]),
receivers=np.array([2, 3]),
n_node=np.array([4]),
n_edge=np.array([2]),
globals=None),
jraph.GraphsTuple(
nodes=np.arange(5)[:, None],
edges=np.arange(3)[:, None],
senders=np.array([2, 1, 3]),
receivers=np.array([1, 4, 0]),
n_node=np.array([5]),
n_edge=np.array([3]),
globals=None),
]
batched = gn.batch_graphs_by_device(graphs, num_devices)
self.assertLen(batched, num_devices)
np.testing.assert_array_equal(
batched[0].nodes,
np.array([0, 1, 0, 1, 2])[:, None])
np.testing.assert_array_equal(
batched[0].edges,
np.array([0, 1, 0])[:, None])
np.testing.assert_array_equal(
batched[0].senders,
np.array([0, 1, 4]))
np.testing.assert_array_equal(
batched[0].receivers,
np.array([1, 0, 2]))
np.testing.assert_array_equal(
batched[0].n_node,
np.array([2, 3]))
np.testing.assert_array_equal(
batched[0].n_edge,
np.array([2, 1]))
np.testing.assert_array_equal(
batched[1].nodes,
np.array([0, 1, 2, 3, 0, 1, 2, 3, 4])[:, None])
np.testing.assert_array_equal(
batched[1].edges,
np.array([0, 1, 0, 1, 2])[:, None])
np.testing.assert_array_equal(
batched[1].senders,
| np.array([1, 0, 6, 5, 7]) | numpy.array |
import os
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
import agents.utils as agent_utils
class FullOracleModel:
def __init__(self, env, input_shape, num_blocks, encoder_filters, encoder_filter_sizes, encoder_strides,
encoder_neurons, learning_rate, weight_decay, gamma, batch_norm=False):
self.env = env
self.input_shape = input_shape
self.num_blocks = num_blocks
self.encoder_filters = encoder_filters
self.encoder_filter_sizes = encoder_filter_sizes
self.encoder_strides = encoder_strides
self.encoder_neurons = encoder_neurons
self.learning_rate = learning_rate
self.weight_decay = weight_decay
self.gamma = gamma
self.batch_norm = batch_norm
def encode(self, states, batch_size=100):
assert states.shape[0]
num_steps = int(np.ceil(states.shape[0] / batch_size))
embeddings = []
for i in range(num_steps):
batch_slice = np.index_exp[i * batch_size:(i + 1) * batch_size]
embedding = self.session.run(self.state_block_t, feed_dict={
self.states_pl: states[batch_slice]
})
embeddings.append(embedding)
embeddings = np.concatenate(embeddings, axis=0)
return embeddings
def build(self):
self.build_placeholders_and_constants()
self.build_model()
self.build_training()
def build_placeholders_and_constants(self):
self.states_pl = tf.placeholder(tf.float32, shape=(None, *self.input_shape), name="states_pl")
self.actions_pl = tf.placeholder(tf.int32, shape=(None,), name="actions_pl")
self.rewards_pl = tf.placeholder(tf.float32, shape=(None,), name="rewards_pl")
self.dones_pl = tf.placeholder(tf.bool, shape=(None,), name="dones_pl")
self.next_states_pl = tf.placeholder(tf.float32, shape=(None, *self.input_shape), name="next_states_pl")
self.is_training_pl = tf.placeholder(tf.bool, shape=[], name="is_training_pl")
self.r_c = tf.constant(self.env.r, dtype=tf.float32)
self.p_c = tf.constant(self.env.p, dtype=tf.float32)
def build_model(self):
self.state_block_t = self.build_encoder(self.states_pl)
self.next_state_block_t = self.build_encoder(self.next_states_pl, share_weights=True)
def build_training(self):
r_t = tf.gather(self.r_c, self.actions_pl)
p_t = tf.gather(self.p_c, self.actions_pl)
dones_t = tf.cast(self.dones_pl, tf.float32)
self.reward_loss_t = tf.square(self.rewards_pl - tf.reduce_sum(self.state_block_t * r_t, axis=1))
self.transition_loss_t = tf.reduce_sum(
tf.square(tf.stop_gradient(self.next_state_block_t) - tf.matmul(tf.expand_dims(self.state_block_t, axis=1), p_t)[:, 0, :]),
axis=1
) * (1 - dones_t)
reg = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
if len(reg) > 0:
self.regularization_loss_t = tf.add_n(reg)
else:
self.regularization_loss_t = 0
self.loss_t = tf.reduce_mean(
(1 / 2) * (self.reward_loss_t + self.gamma * self.transition_loss_t), axis=0
) + self.regularization_loss_t
self.train_step = tf.train.AdamOptimizer(self.learning_rate).minimize(self.loss_t)
if self.batch_norm:
self.update_op = tf.group(*tf.get_collection(tf.GraphKeys.UPDATE_OPS))
self.train_step = tf.group(self.train_step, self.update_op)
def build_encoder(self, input_t, share_weights=False):
x = tf.expand_dims(input_t, axis=-1)
with tf.variable_scope("encoder", reuse=share_weights):
for idx in range(len(self.encoder_filters)):
with tf.variable_scope("conv{:d}".format(idx + 1)):
x = tf.layers.conv2d(
x, self.encoder_filters[idx], self.encoder_filter_sizes[idx], self.encoder_strides[idx],
padding="SAME", activation=tf.nn.relu if not self.batch_norm else None,
kernel_regularizer=agent_utils.get_weight_regularizer(self.weight_decay),
kernel_initializer=agent_utils.get_mrsa_initializer(),
use_bias=not self.batch_norm
)
if self.batch_norm and idx != len(self.encoder_filters) - 1:
x = tf.layers.batch_normalization(x, training=self.is_training_pl)
x = tf.nn.relu(x)
x = tf.layers.flatten(x)
if self.batch_norm:
x = tf.layers.batch_normalization(x, training=self.is_training_pl)
x = tf.nn.relu(x)
for idx, neurons in enumerate(self.encoder_neurons):
with tf.variable_scope("fc{:d}".format(idx + 1)):
x = tf.layers.dense(
x, neurons, activation=tf.nn.relu if not self.batch_norm else None,
kernel_regularizer=agent_utils.get_weight_regularizer(self.weight_decay),
kernel_initializer=agent_utils.get_mrsa_initializer(),
use_bias=not self.batch_norm
)
if self.batch_norm:
x = tf.layers.batch_normalization(x, training=self.is_training_pl)
x = tf.nn.relu(x)
with tf.variable_scope("predict"):
x = tf.layers.dense(
x, self.num_blocks, activation=tf.nn.softmax,
kernel_regularizer=agent_utils.get_weight_regularizer(self.weight_decay),
kernel_initializer=agent_utils.get_mrsa_initializer()
)
return x
def start_session(self, gpu_memory=None):
gpu_options = None
if gpu_memory is not None:
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=gpu_memory)
tf_config = tf.ConfigProto(gpu_options=gpu_options)
self.session = tf.Session(config=tf_config)
self.session.run(tf.global_variables_initializer())
def stop_session(self):
if self.session is not None:
self.session.close()
class PartialOracleModel:
ENCODER_NAMESPACE = "encoder"
TARGET_ENCODER_NAMESPACE = "target_encoder"
def __init__(self, input_shape, num_blocks, num_actions, encoder_filters, encoder_filter_sizes,
encoder_strides, encoder_neurons, learning_rate_encoder, learning_rate_model, weight_decay, gamma,
optimizer_encoder, optimizer_model, max_steps, batch_norm=True, target_network=False,
add_hand_state=False, add_entropy=False, entropy_from=10000, entropy_start=0.0, entropy_end=0.1,
ce_transitions=False):
self.input_shape = input_shape
self.num_blocks = num_blocks
self.num_actions = num_actions
self.encoder_filters = encoder_filters
self.encoder_filter_sizes = encoder_filter_sizes
self.encoder_strides = encoder_strides
self.encoder_neurons = encoder_neurons
self.learning_rate_encoder = learning_rate_encoder
self.learning_rate_model = learning_rate_model
self.weight_decay = weight_decay
self.gamma = gamma
self.optimizer_encoder = optimizer_encoder
self.optimizer_model = optimizer_model
self.max_steps = max_steps
self.batch_norm = batch_norm
self.target_network = target_network
self.add_hand_state = add_hand_state
self.add_entropy = add_entropy
self.entropy_from = entropy_from
self.entropy_start = entropy_start
self.entropy_end = entropy_end
self.ce_transitions = ce_transitions
self.hand_states_pl, self.next_hand_states_pl = None, None
def encode(self, states, batch_size=100, hand_states=None):
assert states.shape[0]
num_steps = int(np.ceil(states.shape[0] / batch_size))
embeddings = []
for i in range(num_steps):
batch_slice = np.index_exp[i * batch_size:(i + 1) * batch_size]
feed_dict = {
self.states_pl: states[batch_slice],
self.is_training_pl: False
}
if hand_states is not None:
feed_dict[self.hand_states_pl] = hand_states[batch_slice]
embedding = self.session.run(self.state_block_t, feed_dict=feed_dict)
embeddings.append(embedding)
embeddings = np.concatenate(embeddings, axis=0)
return embeddings
def build(self):
self.build_placeholders()
self.build_model()
self.build_training()
def build_placeholders(self):
self.states_pl = tf.placeholder(tf.float32, shape=(None, *self.input_shape), name="states_pl")
self.actions_pl = tf.placeholder(tf.int32, shape=(None,), name="actions_pl")
self.rewards_pl = tf.placeholder(tf.float32, shape=(None,), name="rewards_pl")
self.dones_pl = tf.placeholder(tf.bool, shape=(None,), name="dones_pl")
self.next_states_pl = tf.placeholder(tf.float32, shape=(None, *self.input_shape), name="next_states_pl")
self.is_training_pl = tf.placeholder(tf.bool, shape=[], name="is_training_pl")
if self.add_hand_state:
self.hand_states_pl = tf.placeholder(tf.float32, shape=(None, 1), name="hand_states_pl")
self.next_hand_states_pl = tf.placeholder(tf.float32, shape=(None, 1), name="next_hand_states_pl")
def build_model(self):
self.state_block_t = self.build_encoder(self.states_pl, hand_state=self.hand_states_pl)
if self.target_network:
self.next_state_block_t = self.build_encoder(
self.next_states_pl, share_weights=False, namespace=self.TARGET_ENCODER_NAMESPACE,
hand_state=self.next_hand_states_pl
)
self.build_target_update()
else:
self.next_state_block_t = self.build_encoder(
self.next_states_pl, share_weights=True, hand_state=self.next_hand_states_pl
)
self.r_t = tf.get_variable(
"reward_matrix", shape=(self.num_actions, self.num_blocks), dtype=tf.float32,
initializer=tf.random_uniform_initializer(minval=0, maxval=1, dtype=tf.float32)
)
self.p_t = tf.get_variable(
"transition_matrix", shape=(self.num_actions, self.num_blocks, self.num_blocks), dtype=tf.float32,
initializer=tf.random_uniform_initializer(minval=0, maxval=1, dtype=tf.float32)
)
def build_training(self):
self.global_step = tf.train.get_or_create_global_step()
r_t = tf.gather(self.r_t, self.actions_pl)
p_t = tf.gather(self.p_t, self.actions_pl)
dones_t = tf.cast(self.dones_pl, tf.float32)
self.reward_loss_t = (1 / 2) * tf.square(self.rewards_pl - tf.reduce_sum(self.state_block_t * r_t, axis=1))
if self.ce_transitions:
# treat p_t as log probabilities
p_t = tf.nn.softmax(p_t, axis=-1)
# predict next state
next_state = tf.matmul(tf.expand_dims(self.state_block_t, axis=1), p_t)[:, 0, :]
# cross entropy between next state probs and predicted probs
self.transition_loss_t = - self.next_state_block_t * tf.log(next_state + 1e-7)
self.transition_loss_t = tf.reduce_sum(self.transition_loss_t, axis=-1)
self.transition_loss_t = self.transition_loss_t * (1 - dones_t)
else:
self.transition_loss_t = (1 / 2) * tf.reduce_sum(
tf.square(tf.stop_gradient(self.next_state_block_t) -
tf.matmul(tf.expand_dims(self.state_block_t, axis=1), p_t)[:, 0, :]),
axis=1
) * (1 - dones_t)
reg = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
if len(reg) > 0:
self.regularization_loss_t = tf.add_n(reg)
else:
self.regularization_loss_t = 0
self.loss_t = tf.reduce_mean(
self.reward_loss_t + self.gamma * self.transition_loss_t, axis=0
) + self.regularization_loss_t
if self.add_entropy:
plogs = self.state_block_t * tf.log(self.state_block_t + 1e-7)
self.entropy_loss_t = tf.reduce_mean(tf.reduce_sum(- plogs, axis=1), axis=0)
f = tf.maximum(0.0, tf.cast(self.global_step - self.entropy_from, tf.float32)) / \
(self.max_steps - self.entropy_from)
f = f * (self.entropy_end - self.entropy_start) + self.entropy_start
self.loss_t += f * self.entropy_loss_t
encoder_variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=self.ENCODER_NAMESPACE)
model_variables = [self.r_t, self.p_t]
encoder_optimizer = agent_utils.get_optimizer(self.optimizer_encoder, self.learning_rate_encoder)
model_optimizer = agent_utils.get_optimizer(self.optimizer_model, self.learning_rate_model)
self.encoder_train_step = encoder_optimizer.minimize(
self.loss_t, global_step=self.global_step, var_list=encoder_variables
)
if self.batch_norm:
self.update_op = tf.group(*tf.get_collection(tf.GraphKeys.UPDATE_OPS))
self.encoder_train_step = tf.group(self.encoder_train_step, self.update_op)
self.model_train_step = model_optimizer.minimize(
self.loss_t, var_list=model_variables
)
self.train_step = tf.group(self.encoder_train_step, self.model_train_step)
def build_encoder(self, input_t, share_weights=False, namespace=ENCODER_NAMESPACE, hand_state=None):
x = tf.expand_dims(input_t, axis=-1)
with tf.variable_scope(namespace, reuse=share_weights):
for idx in range(len(self.encoder_filters)):
with tf.variable_scope("conv{:d}".format(idx + 1)):
x = tf.layers.conv2d(
x, self.encoder_filters[idx], self.encoder_filter_sizes[idx], self.encoder_strides[idx],
padding="SAME", activation=tf.nn.relu if not self.batch_norm else None,
kernel_regularizer=agent_utils.get_weight_regularizer(self.weight_decay),
kernel_initializer=agent_utils.get_mrsa_initializer(),
use_bias=not self.batch_norm
)
if self.batch_norm and idx != len(self.encoder_filters) - 1:
x = tf.layers.batch_normalization(x, training=self.is_training_pl)
x = tf.nn.relu(x)
x = tf.layers.flatten(x)
if self.batch_norm:
x = tf.layers.batch_normalization(x, training=self.is_training_pl)
x = tf.nn.relu(x)
if hand_state is not None:
x = tf.concat([x, hand_state], axis=1)
for idx, neurons in enumerate(self.encoder_neurons):
with tf.variable_scope("fc{:d}".format(idx + 1)):
x = tf.layers.dense(
x, neurons, activation=tf.nn.relu if not self.batch_norm else None,
kernel_regularizer=agent_utils.get_weight_regularizer(self.weight_decay),
kernel_initializer=agent_utils.get_mrsa_initializer(),
use_bias=not self.batch_norm
)
if self.batch_norm:
x = tf.layers.batch_normalization(x, training=self.is_training_pl)
x = tf.nn.relu(x)
with tf.variable_scope("predict"):
x = tf.layers.dense(
x, self.num_blocks, activation=tf.nn.softmax,
kernel_regularizer=agent_utils.get_weight_regularizer(self.weight_decay),
kernel_initializer=agent_utils.get_mrsa_initializer()
)
return x
def build_target_update(self):
source_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=self.ENCODER_NAMESPACE)
target_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=self.TARGET_ENCODER_NAMESPACE)
assert len(source_vars) == len(target_vars) and len(source_vars) > 0
update_ops = []
for source_var, target_var in zip(source_vars, target_vars):
update_ops.append(tf.assign(target_var, source_var))
self.target_update_op = tf.group(*update_ops)
def start_session(self, gpu_memory=None):
gpu_options = None
if gpu_memory is not None:
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=gpu_memory)
tf_config = tf.ConfigProto(gpu_options=gpu_options)
self.session = tf.Session(config=tf_config)
self.session.run(tf.global_variables_initializer())
def stop_session(self):
if self.session is not None:
self.session.close()
class GumbelModel:
ENCODER_NAMESPACE = "encoder"
TARGET_ENCODER_NAMESPACE = "target_encoder"
def __init__(self, input_shape, num_blocks, num_actions, encoder_filters, encoder_filter_sizes,
encoder_strides, encoder_neurons, learning_rate_encoder, learning_rate_r, learning_rate_p,
weight_decay, gamma, optimizer_encoder, optimizer_model, max_steps, batch_norm=True,
target_network=True, gamma_schedule=None, straight_through=False, kl=False, kl_weight=1.0,
oracle_r=None, oracle_p=None, transitions_mse=False, correct_ce=False):
self.input_shape = input_shape
self.num_blocks = num_blocks
self.num_actions = num_actions
self.encoder_filters = encoder_filters
self.encoder_filter_sizes = encoder_filter_sizes
self.encoder_strides = encoder_strides
self.encoder_neurons = encoder_neurons
self.learning_rate_encoder = learning_rate_encoder
self.learning_rate_r = learning_rate_r
self.learning_rate_p = learning_rate_p
self.weight_decay = weight_decay
self.gamma = gamma
self.optimizer_encoder = optimizer_encoder
self.optimizer_model = optimizer_model
self.max_steps = max_steps
self.batch_norm = batch_norm
self.target_network = target_network
self.gamma_schedule = gamma_schedule
self.straight_through = straight_through
self.kl = kl
self.kl_weight = kl_weight
self.oracle_r = oracle_r
self.oracle_p = oracle_p
self.transitions_mse = transitions_mse
self.correct_ce = correct_ce
if self.gamma_schedule is not None:
assert len(self.gamma) == len(self.gamma_schedule) + 1
self.hand_states_pl, self.next_hand_states_pl = None, None
def encode(self, states, batch_size=100, hand_states=None):
assert states.shape[0]
num_steps = int(np.ceil(states.shape[0] / batch_size))
embeddings = []
for i in range(num_steps):
batch_slice = np.index_exp[i * batch_size:(i + 1) * batch_size]
feed_dict = {
self.states_pl: states[batch_slice],
self.is_training_pl: False
}
if hand_states is not None:
feed_dict[self.hand_states_pl] = hand_states[batch_slice]
embedding = self.session.run(self.state_block_samples_t, feed_dict=feed_dict)
embeddings.append(embedding)
embeddings = np.concatenate(embeddings, axis=0)
return embeddings
def build(self):
self.build_placeholders()
self.build_model()
self.build_training()
def build_placeholders(self):
self.states_pl = tf.placeholder(tf.float32, shape=(None, *self.input_shape), name="states_pl")
self.actions_pl = tf.placeholder(tf.int32, shape=(None,), name="actions_pl")
self.rewards_pl = tf.placeholder(tf.float32, shape=(None,), name="rewards_pl")
self.dones_pl = tf.placeholder(tf.bool, shape=(None,), name="dones_pl")
self.next_states_pl = tf.placeholder(tf.float32, shape=(None, *self.input_shape), name="next_states_pl")
self.is_training_pl = tf.placeholder(tf.bool, shape=[], name="is_training_pl")
self.hand_states_pl = tf.placeholder(tf.float32, shape=(None, 1), name="hand_states_pl")
self.next_hand_states_pl = tf.placeholder(tf.float32, shape=(None, 1), name="next_hand_states_pl")
self.temperature_pl = tf.placeholder(tf.float32, shape=[], name="temperature_pl")
def build_model(self):
# encode state block
self.state_block_logits_t = self.build_encoder(self.states_pl, hand_state=self.hand_states_pl)
agent_utils.summarize(self.state_block_logits_t, "state_block_logits_t")
self.state_block_cat_dist = tf.contrib.distributions.OneHotCategorical(
logits=self.state_block_logits_t
)
self.state_block_samples_t = tf.cast(self.state_block_cat_dist.sample(), tf.int32)
self.state_block_sg_dist = tf.contrib.distributions.RelaxedOneHotCategorical(
self.temperature_pl, logits=self.state_block_logits_t
)
self.state_block_sg_samples_t = self.state_block_sg_dist.sample()
agent_utils.summarize(self.state_block_sg_samples_t, "state_block_sg_samples_t")
if self.straight_through:
# hard sample
self.state_block_sg_samples_hard_t = \
tf.cast(tf.one_hot(tf.argmax(self.state_block_sg_samples_t, -1), self.num_blocks), tf.float32)
# fake gradients for the hard sample
self.state_block_sg_samples_t = \
tf.stop_gradient(self.state_block_sg_samples_hard_t - self.state_block_sg_samples_t) + \
self.state_block_sg_samples_t
agent_utils.summarize(self.state_block_sg_samples_hard_t, "state_block_sg_samples_hard_t")
# encode next state block
if self.target_network:
self.next_state_block_logits_t = self.build_encoder(
self.next_states_pl, share_weights=False, namespace=self.TARGET_ENCODER_NAMESPACE,
hand_state=self.next_hand_states_pl
)
self.build_target_update()
else:
self.next_state_block_logits_t = self.build_encoder(
self.next_states_pl, share_weights=True, hand_state=self.next_hand_states_pl
)
self.next_state_block_cat_dist = tf.contrib.distributions.OneHotCategorical(
logits=self.next_state_block_logits_t
)
self.next_state_block_samples_t = tf.cast(self.next_state_block_cat_dist.sample(), tf.float32)
self.r_v = tf.get_variable(
"reward_matrix", shape=(self.num_actions, self.num_blocks), dtype=tf.float32,
initializer=tf.random_uniform_initializer(minval=0, maxval=1, dtype=tf.float32)
)
self.r_t = self.r_v
self.p_v = tf.get_variable(
"transition_matrix", shape=(self.num_actions, self.num_blocks, self.num_blocks), dtype=tf.float32,
initializer=tf.random_uniform_initializer(minval=0, maxval=1, dtype=tf.float32)
)
if not self.transitions_mse:
self.p_t = tf.nn.softmax(self.p_v, axis=-1)
else:
self.p_t = self.p_v
def build_training(self):
# set up global step variable
self.global_step = tf.train.get_or_create_global_step()
# gather reward and transition matrices for each action
if self.oracle_r is not None:
r_t = tf.gather(self.oracle_r, self.actions_pl)
else:
r_t = tf.gather(self.r_t, self.actions_pl)
if self.oracle_p is not None:
p_t = tf.gather(self.oracle_p, self.actions_pl)
else:
p_t = tf.gather(self.p_t, self.actions_pl)
dones_t = tf.cast(self.dones_pl, tf.float32)
# reward loss
self.reward_loss_t = tf.square(self.rewards_pl - tf.reduce_sum(self.state_block_sg_samples_t * r_t, axis=1))
# transition loss
next_state = tf.matmul(tf.expand_dims(self.state_block_sg_samples_t, axis=1), p_t)[:, 0, :]
if self.transitions_mse:
self.transition_loss_t = tf.reduce_sum(
tf.square(tf.stop_gradient(self.next_state_block_samples_t) - next_state),
axis=1
) * (1 - dones_t)
else:
if self.correct_ce:
self.transition_loss_t = tf.reduce_sum(
- tf.stop_gradient(tf.nn.softmax(self.next_state_block_logits_t)) * tf.log(next_state + 1e-7),
axis=1
) * (1 - dones_t)
else:
self.transition_loss_t = tf.reduce_sum(
- tf.stop_gradient(self.next_state_block_samples_t) * tf.log(next_state + 1e-7),
axis=1
) * (1 - dones_t)
# weight decay regularizer
reg = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
if len(reg) > 0:
self.regularization_loss_t = tf.add_n(reg)
else:
self.regularization_loss_t = 0.0
# kl divergence regularizer
if self.kl:
prior_logits_t = tf.ones_like(self.state_block_logits_t) / self.num_blocks
prior_cat_dist = tf.contrib.distributions.OneHotCategorical(logits=prior_logits_t)
kl_divergence_t = tf.contrib.distributions.kl_divergence(self.state_block_cat_dist, prior_cat_dist)
self.kl_loss_t = tf.reduce_mean(kl_divergence_t)
else:
self.kl_loss_t = 0.0
# final loss
if self.gamma_schedule is not None:
gamma = tf.train.piecewise_constant(self.global_step, self.gamma_schedule, self.gamma)
else:
gamma = self.gamma
self.loss_t = tf.reduce_mean(
self.reward_loss_t + gamma * self.transition_loss_t, axis=0
) + self.regularization_loss_t + self.kl_weight * self.kl_loss_t
# encoder optimizer
encoder_variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=self.ENCODER_NAMESPACE)
encoder_optimizer = agent_utils.get_optimizer(self.optimizer_encoder, self.learning_rate_encoder)
self.encoder_train_step = encoder_optimizer.minimize(
self.loss_t, global_step=self.global_step, var_list=encoder_variables
)
# add batch norm updates
if self.batch_norm:
self.update_op = tf.group(*tf.get_collection(tf.GraphKeys.UPDATE_OPS))
self.encoder_train_step = tf.group(self.encoder_train_step, self.update_op)
# model optimizer if not full oracle
if self.oracle_r is None or self.oracle_p is None:
r_optimizer = agent_utils.get_optimizer(self.optimizer_model, self.learning_rate_r)
r_step = r_optimizer.minimize(
self.reward_loss_t, var_list=[self.r_v]
)
p_optimizer = agent_utils.get_optimizer(self.optimizer_model, self.learning_rate_p)
p_step = p_optimizer.minimize(
self.transition_loss_t, var_list=[self.p_v]
)
self.model_train_step = tf.group(r_step, p_step)
self.train_step = tf.group(self.encoder_train_step, self.model_train_step)
else:
self.train_step = self.encoder_train_step
def build_encoder(self, input_t, share_weights=False, namespace=ENCODER_NAMESPACE, hand_state=None):
x = tf.expand_dims(input_t, axis=-1)
with tf.variable_scope(namespace, reuse=share_weights):
for idx in range(len(self.encoder_filters)):
with tf.variable_scope("conv{:d}".format(idx + 1)):
x = tf.layers.conv2d(
x, self.encoder_filters[idx], self.encoder_filter_sizes[idx], self.encoder_strides[idx],
padding="SAME", activation=tf.nn.relu if not self.batch_norm else None,
kernel_regularizer=agent_utils.get_weight_regularizer(self.weight_decay),
kernel_initializer=agent_utils.get_mrsa_initializer(),
use_bias=not self.batch_norm
)
if self.batch_norm and idx != len(self.encoder_filters) - 1:
x = tf.layers.batch_normalization(x, training=self.is_training_pl)
x = tf.nn.relu(x)
x = tf.layers.flatten(x)
if self.batch_norm:
x = tf.layers.batch_normalization(x, training=self.is_training_pl)
x = tf.nn.relu(x)
if hand_state is not None:
x = tf.concat([x, hand_state], axis=1)
for idx, neurons in enumerate(self.encoder_neurons):
with tf.variable_scope("fc{:d}".format(idx + 1)):
x = tf.layers.dense(
x, neurons, activation=tf.nn.relu if not self.batch_norm else None,
kernel_regularizer=agent_utils.get_weight_regularizer(self.weight_decay),
kernel_initializer=agent_utils.get_mrsa_initializer(),
use_bias=not self.batch_norm
)
if self.batch_norm:
x = tf.layers.batch_normalization(x, training=self.is_training_pl)
x = tf.nn.relu(x)
with tf.variable_scope("predict"):
x = tf.layers.dense(
x, self.num_blocks, activation=None,
kernel_regularizer=agent_utils.get_weight_regularizer(self.weight_decay),
kernel_initializer=agent_utils.get_mrsa_initializer()
)
return x
def build_target_update(self):
source_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=self.ENCODER_NAMESPACE)
target_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=self.TARGET_ENCODER_NAMESPACE)
assert len(source_vars) == len(target_vars) and len(source_vars) > 0
update_ops = []
for source_var, target_var in zip(source_vars, target_vars):
update_ops.append(tf.assign(target_var, source_var))
self.target_update_op = tf.group(*update_ops)
def build_summaries(self):
# losses
tf.summary.scalar("loss", self.loss_t)
tf.summary.scalar("transition_loss", tf.reduce_mean(self.transition_loss_t))
tf.summary.scalar("reward_loss", tf.reduce_mean(self.reward_loss_t))
# logits grads
grad_t = tf.gradients(tf.reduce_mean(self.transition_loss_t), self.state_block_logits_t)
grad_r = tf.gradients(tf.reduce_mean(self.reward_loss_t), self.state_block_logits_t)
norm_grad_t = tf.norm(grad_t, ord=2, axis=-1)[0]
norm_grad_r = tf.norm(grad_r, ord=2, axis=-1)[0]
agent_utils.summarize(norm_grad_t, "logits_grad_t")
agent_utils.summarize(norm_grad_r, "logits_grad_r")
# samples grads
grad_t = tf.gradients(tf.reduce_mean(self.transition_loss_t), self.state_block_sg_samples_t)
grad_r = tf.gradients(tf.reduce_mean(self.reward_loss_t), self.state_block_sg_samples_t)
norm_grad_t = tf.norm(grad_t, ord=2, axis=-1)[0]
norm_grad_r = tf.norm(grad_r, ord=2, axis=-1)[0]
agent_utils.summarize(norm_grad_t, "sg_samples_grad_t")
agent_utils.summarize(norm_grad_r, "sg_samples_grad_r")
def start_session(self, gpu_memory=None):
gpu_options = None
if gpu_memory is not None:
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=gpu_memory)
tf_config = tf.ConfigProto(gpu_options=gpu_options)
self.session = tf.Session(config=tf_config)
self.session.run(tf.global_variables_initializer())
def stop_session(self):
if self.session is not None:
self.session.close()
class ExpectationModel:
ENCODER_NAMESPACE = "encoder"
TARGET_ENCODER_NAMESPACE = "target_encoder"
def __init__(self, input_shape, num_blocks, num_actions, encoder_filters, encoder_filter_sizes,
encoder_strides, encoder_neurons, learning_rate_encoder, learning_rate_r, learning_rate_t,
weight_decay, gamma, optimizer_encoder, optimizer_model, max_steps, batch_norm=True,
target_network=True, oracle_r=None, oracle_t=None, propagate_next_state=False,
z_transform=False, abs_z_transform=False, sigsoftmax=False, encoder_tau=1.0, model_tau=1.0,
no_tau_target_encoder=False, kl_penalty=False, kl_penalty_weight=0.01, small_r_init=False,
small_t_init=False):
if propagate_next_state:
assert not target_network
self.input_shape = input_shape
self.num_blocks = num_blocks
self.num_actions = num_actions
self.encoder_filters = encoder_filters
self.encoder_filter_sizes = encoder_filter_sizes
self.encoder_strides = encoder_strides
self.encoder_neurons = encoder_neurons
self.learning_rate_encoder = learning_rate_encoder
self.learning_rate_r = learning_rate_r
self.learning_rate_t = learning_rate_t
self.weight_decay = weight_decay
self.gamma = gamma
self.optimizer_encoder = optimizer_encoder
self.optimizer_model = optimizer_model
self.max_steps = max_steps
self.batch_norm = batch_norm
self.target_network = target_network
self.oracle_r = oracle_r
self.oracle_t = oracle_t
self.propagate_next_state = propagate_next_state
self.z_transform = z_transform
self.abs_z_transform = abs_z_transform
self.sigsoftmax = sigsoftmax
self.encoder_tau = encoder_tau
self.model_tau = model_tau
self.no_tau_target_encoder = no_tau_target_encoder
self.kl_penalty = kl_penalty
self.kl_penalty_weight = kl_penalty_weight
self.small_r_init = small_r_init
self.small_t_init = small_t_init
self.hand_states_pl, self.next_hand_states_pl = None, None
def encode(self, depths, hand_states, batch_size=100):
num_steps = int(np.ceil(depths.shape[0] / batch_size))
embeddings = []
for i in range(num_steps):
batch_slice = np.index_exp[i * batch_size:(i + 1) * batch_size]
feed_dict = {
self.states_pl: depths[batch_slice],
self.hand_states_pl: hand_states[batch_slice],
self.is_training_pl: False
}
embedding = self.session.run(self.state_softmax_t, feed_dict=feed_dict)
embeddings.append(embedding)
embeddings = np.concatenate(embeddings, axis=0)
return embeddings
def validate(self, depths, hand_states, actions, rewards, next_depths, next_hand_states, dones, batch_size=100):
num_steps = int(np.ceil(depths.shape[0] / batch_size))
losses = []
for i in range(num_steps):
batch_slice = np.index_exp[i * batch_size:(i + 1) * batch_size]
feed_dict = {
self.states_pl: depths[batch_slice],
self.hand_states_pl: hand_states[:, np.newaxis][batch_slice],
self.actions_pl: actions[batch_slice],
self.rewards_pl: rewards[batch_slice],
self.next_states_pl: next_depths[batch_slice],
self.next_hand_states_pl: next_hand_states[:, np.newaxis][batch_slice],
self.dones_pl: dones[batch_slice],
self.is_training_pl: False
}
l1, l2 = self.session.run([self.full_transition_loss_t, self.full_reward_loss_t], feed_dict=feed_dict)
losses.append(np.transpose(np.array([l1, l2]), axes=(1, 0)))
losses = np.concatenate(losses, axis=0)
return losses
def validate_and_encode(self, depths, hand_states, actions, rewards, next_depths, next_hand_states, dones,
batch_size=100):
num_steps = int(np.ceil(depths.shape[0] / batch_size))
losses = []
embeddings = []
for i in range(num_steps):
batch_slice = np.index_exp[i * batch_size:(i + 1) * batch_size]
feed_dict = {
self.states_pl: depths[batch_slice],
self.hand_states_pl: hand_states[:, np.newaxis][batch_slice],
self.actions_pl: actions[batch_slice],
self.rewards_pl: rewards[batch_slice],
self.next_states_pl: next_depths[batch_slice],
self.next_hand_states_pl: next_hand_states[:, np.newaxis][batch_slice],
self.dones_pl: dones[batch_slice],
self.is_training_pl: False
}
tmp_embeddings, l1, l2 = self.session.run([
self.state_softmax_t, self.full_transition_loss_t, self.full_reward_loss_t], feed_dict=feed_dict
)
losses.append(np.transpose(np.array([l1, l2]), axes=(1, 0)))
embeddings.append(tmp_embeddings)
losses = np.concatenate(losses, axis=0)
embeddings = np.concatenate(embeddings)
return losses, embeddings
def build(self):
self.build_placeholders()
self.build_model()
self.build_training()
def build_placeholders(self):
self.states_pl = tf.placeholder(tf.float32, shape=(None, *self.input_shape), name="states_pl")
self.actions_pl = tf.placeholder(tf.int32, shape=(None,), name="actions_pl")
self.rewards_pl = tf.placeholder(tf.float32, shape=(None,), name="rewards_pl")
self.dones_pl = tf.placeholder(tf.bool, shape=(None,), name="dones_pl")
self.next_states_pl = tf.placeholder(tf.float32, shape=(None, *self.input_shape), name="next_states_pl")
self.is_training_pl = tf.placeholder(tf.bool, shape=[], name="is_training_pl")
self.hand_states_pl = tf.placeholder(tf.float32, shape=(None, 1), name="hand_states_pl")
self.next_hand_states_pl = tf.placeholder(tf.float32, shape=(None, 1), name="next_hand_states_pl")
def build_model(self):
self.state_logits_t, self.state_softmax_t = self.build_encoder(
self.states_pl, self.hand_states_pl, self.encoder_tau
)
self.perplexity_t = tf.constant(2, dtype=tf.float32) ** (
- tf.reduce_mean(
tf.reduce_sum(
self.state_softmax_t * tf.log(self.state_softmax_t + 1e-7) /
tf.log(tf.constant(2, dtype=self.state_softmax_t.dtype)),
axis=1
),
axis=0
)
)
if self.no_tau_target_encoder:
target_tau = 1.0
else:
target_tau = self.encoder_tau
if self.target_network:
self.next_state_logits_t, self.next_state_softmax_t = self.build_encoder(
self.next_states_pl, self.next_hand_states_pl, target_tau, share_weights=False,
namespace=self.TARGET_ENCODER_NAMESPACE
)
self.build_target_update()
else:
self.next_state_logits_t, self.next_state_softmax_t = self.build_encoder(
self.next_states_pl, self.next_hand_states_pl, target_tau, share_weights=True
)
self.build_r_model()
self.build_t_model()
def build_r_model(self):
if self.small_r_init:
r_init = tf.random_normal_initializer(mean=0, stddev=0.1, dtype=tf.float32)
else:
r_init = tf.random_uniform_initializer(minval=0, maxval=1, dtype=tf.float32)
self.r_v = tf.get_variable(
"reward_matrix", shape=(self.num_actions, self.num_blocks), dtype=tf.float32,
initializer=r_init
)
self.r_t = self.r_v
def build_t_model(self):
if self.small_t_init:
t_init = tf.random_normal_initializer(mean=0, stddev=0.1, dtype=tf.float32)
else:
t_init = tf.random_uniform_initializer(minval=0, maxval=1, dtype=tf.float32)
self.t_v = tf.get_variable(
"transition_matrix", shape=(self.num_actions, self.num_blocks, self.num_blocks), dtype=tf.float32,
initializer=t_init
)
self.t_softmax_t = tf.nn.softmax(self.t_v / self.model_tau, axis=2)
self.t_logsoftmax_t = tf.nn.log_softmax(self.t_v / self.model_tau, axis=2)
def build_training(self):
# prep
self.global_step = tf.train.get_or_create_global_step()
self.gather_matrices()
self.dones_float_t = tf.cast(self.dones_pl, tf.float32)
# build losses
self.build_reward_loss()
self.build_transition_loss()
self.build_regularization_loss()
self.build_kl_penalty()
# build full loss
self.gamma_v = tf.Variable(initial_value=self.gamma, trainable=False)
self.loss_t = self.reward_loss_t + tf.stop_gradient(self.gamma_v) * self.transition_loss_t + \
self.regularization_loss_t + self.kl_penalty_weight_v * self.kl_penalty_t
# build training
self.build_encoder_training()
self.build_model_training()
# integrate training into a single op
if self.model_train_step is None:
self.train_step = self.encoder_train_step
else:
self.train_step = tf.group(self.encoder_train_step, self.model_train_step)
def gather_matrices(self):
if self.oracle_r is not None:
self.r_gather_t = tf.gather(self.oracle_r, self.actions_pl)
else:
self.r_gather_t = tf.gather(self.r_t, self.actions_pl)
if self.oracle_t is not None:
self.t_logsoftmax_gather_t = tf.gather(self.oracle_t, self.actions_pl)
else:
self.t_logsoftmax_gather_t = tf.gather(self.t_logsoftmax_t, self.actions_pl)
def build_reward_loss(self):
term1 = tf.square(self.rewards_pl[:, tf.newaxis] - self.r_gather_t)
term2 = term1 * self.state_softmax_t
self.full_reward_loss_t = (1 / 2) * tf.reduce_sum(term2, axis=1)
self.reward_loss_t = (1 / 2) * tf.reduce_mean(tf.reduce_sum(term2, axis=1), axis=0)
def build_transition_loss(self):
if self.propagate_next_state:
self.transition_term1 = self.state_softmax_t[:, :, tf.newaxis] * self.next_state_softmax_t[:, tf.newaxis, :]
else:
self.transition_term1 = self.state_softmax_t[:, :, tf.newaxis] * tf.stop_gradient(
self.next_state_softmax_t[:, tf.newaxis, :]
)
self.transition_term2 = self.transition_term1 * self.t_logsoftmax_gather_t
self.full_transition_loss_t = - tf.reduce_sum(self.transition_term2, axis=[1, 2]) * (1 - self.dones_float_t)
self.transition_loss_t = tf.reduce_sum(self.full_transition_loss_t, axis=0) / tf.reduce_max(
[1.0, tf.reduce_sum(1 - self.dones_float_t)]
)
def build_regularization_loss(self):
reg = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
if len(reg) > 0:
self.regularization_loss_t = tf.add_n(reg)
else:
self.regularization_loss_t = 0
def build_kl_penalty(self):
self.kl_penalty_weight_v = tf.Variable(self.kl_penalty_weight, trainable=False, dtype=tf.float32)
self.kl_penalty_weight_pl = tf.placeholder(tf.float32, shape=[], name="kl_penalty_weight_pl")
self.kl_penalty_weight_assign = tf.assign(self.kl_penalty_weight_v, self.kl_penalty_weight_pl)
if self.kl_penalty:
log_softmax = tf.nn.log_softmax(self.state_logits_t, axis=-1)
self.kl_penalty_t = tf.reduce_mean(tf.reduce_sum(self.state_softmax_t * log_softmax, axis=-1), axis=0)
else:
self.kl_penalty_t = 0.0
def build_encoder_training(self):
encoder_variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=self.ENCODER_NAMESPACE)
encoder_optimizer = agent_utils.get_optimizer(self.optimizer_encoder, self.learning_rate_encoder)
self.encoder_train_step = encoder_optimizer.minimize(
self.loss_t, global_step=self.global_step, var_list=encoder_variables
)
if self.batch_norm:
self.update_op = tf.group(*tf.get_collection(tf.GraphKeys.UPDATE_OPS))
self.encoder_train_step = tf.group(self.encoder_train_step, self.update_op)
def build_model_training(self):
model_train_step = []
if self.oracle_r is None:
r_optimizer = agent_utils.get_optimizer(self.optimizer_model, self.learning_rate_r)
self.r_step = r_optimizer.minimize(
self.reward_loss_t, var_list=[self.r_v]
)
model_train_step.append(self.r_step)
if self.oracle_t is None:
t_optimizer = agent_utils.get_optimizer(self.optimizer_model, self.learning_rate_t)
self.t_step = t_optimizer.minimize(
self.transition_loss_t, var_list=[self.t_v]
)
model_train_step.append(self.t_step)
if len(model_train_step) > 0:
self.model_train_step = tf.group(*model_train_step)
else:
self.model_train_step = None
def build_encoder(self, depth_pl, hand_state_pl, tau, share_weights=False, namespace=ENCODER_NAMESPACE):
x = tf.expand_dims(depth_pl, axis=-1)
with tf.variable_scope(namespace, reuse=share_weights):
for idx in range(len(self.encoder_filters)):
with tf.variable_scope("conv{:d}".format(idx + 1)):
x = tf.layers.conv2d(
x, self.encoder_filters[idx], self.encoder_filter_sizes[idx], self.encoder_strides[idx],
padding="SAME", activation=tf.nn.relu if not self.batch_norm else None,
kernel_regularizer=agent_utils.get_weight_regularizer(self.weight_decay),
kernel_initializer=agent_utils.get_mrsa_initializer(),
use_bias=not self.batch_norm
)
if self.batch_norm and idx != len(self.encoder_filters) - 1:
x = tf.layers.batch_normalization(x, training=self.is_training_pl)
x = tf.nn.relu(x)
x = tf.layers.flatten(x)
if self.batch_norm:
x = tf.layers.batch_normalization(x, training=self.is_training_pl)
x = tf.nn.relu(x)
x = tf.concat([x, hand_state_pl], axis=1)
for idx, neurons in enumerate(self.encoder_neurons):
with tf.variable_scope("fc{:d}".format(idx + 1)):
x = tf.layers.dense(
x, neurons, activation=tf.nn.relu if not self.batch_norm else None,
kernel_regularizer=agent_utils.get_weight_regularizer(self.weight_decay),
kernel_initializer=agent_utils.get_mrsa_initializer(),
use_bias=not self.batch_norm
)
if self.batch_norm:
x = tf.layers.batch_normalization(x, training=self.is_training_pl)
x = tf.nn.relu(x)
with tf.variable_scope("predict"):
x = tf.layers.dense(
x, self.num_blocks, activation=None,
kernel_regularizer=agent_utils.get_weight_regularizer(self.weight_decay),
kernel_initializer=agent_utils.get_mrsa_initializer()
)
if self.z_transform:
dist = x - tf.reduce_min(x, axis=1)[:, tf.newaxis]
dist = dist / tf.reduce_sum(dist, axis=1)[:, tf.newaxis]
elif self.abs_z_transform:
abs_x = tf.abs(x)
dist = abs_x / tf.reduce_sum(abs_x, axis=1)[:, tf.newaxis]
elif self.sigsoftmax:
e_x = tf.exp(x - tf.reduce_max(x, axis=1)[:, tf.newaxis])
sig_e_x = e_x * tf.nn.sigmoid(x)
dist = sig_e_x / tf.reduce_sum(sig_e_x, axis=1)[:, tf.newaxis]
else:
dist = tf.nn.softmax(x / tau)
return x, dist
def build_target_update(self):
source_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=self.ENCODER_NAMESPACE)
target_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=self.TARGET_ENCODER_NAMESPACE)
assert len(source_vars) == len(target_vars) and len(source_vars) > 0
update_ops = []
for source_var, target_var in zip(source_vars, target_vars):
update_ops.append(tf.assign(target_var, source_var))
self.target_update_op = tf.group(*update_ops)
def build_summaries(self):
# losses
tf.summary.scalar("loss", self.loss_t)
tf.summary.scalar("transition_loss", tf.reduce_mean(self.transition_loss_t))
tf.summary.scalar("reward_loss", tf.reduce_mean(self.reward_loss_t))
# logits and softmax
agent_utils.summarize(self.state_logits_t, "logits")
agent_utils.summarize(self.state_softmax_t, "softmax")
# gradients
self.grad_norms_d = dict()
for target, target_name in zip(
[self.loss_t, self.transition_loss_t, self.reward_loss_t], ["total_loss", "t_loss", "r_loss"]
):
for source, source_name in zip([self.state_logits_t, self.state_softmax_t], ["logits", "softmax"]):
grads = tf.gradients(tf.reduce_mean(target), source)
grad_norms = tf.norm(grads, ord=1, axis=-1)[0]
name = "state_{}_grad_{}".format(source_name, target_name)
self.grad_norms_d[name] = tf.reduce_mean(grad_norms)
agent_utils.summarize(grad_norms, name)
def set_gamma(self, value):
self.session.run(tf.assign(self.gamma_v, value))
def set_kl_penalty_weight(self, value):
self.session.run(self.kl_penalty_weight_assign, feed_dict={self.kl_penalty_weight_pl: value})
def start_session(self, gpu_memory=None):
gpu_options = None
if gpu_memory is not None:
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=gpu_memory)
tf_config = tf.ConfigProto(gpu_options=gpu_options)
self.session = tf.Session(config=tf_config)
self.session.run(tf.global_variables_initializer())
def stop_session(self):
if self.session is not None:
self.session.close()
def save_matrices_as_images(self, step, save_dir, ext="pdf"):
r, p = self.session.run([self.r_t, self.t_softmax_t])
r = np.reshape(r, (r.shape[0], -1))
p = np.reshape(p, (p.shape[0], -1))
r_path = os.path.join(save_dir, "r_{:d}.{}".format(step, ext))
p_path = os.path.join(save_dir, "p_{:d}.{}".format(step, ext))
plt.clf()
plt.imshow(r, vmin=-0.5, vmax=1.5)
plt.colorbar()
plt.savefig(r_path)
plt.clf()
plt.imshow(p, vmin=0, vmax=1)
plt.colorbar()
plt.savefig(p_path)
class ExpectationModelGaussian(ExpectationModel):
def __init__(self, input_shape, num_blocks, num_actions, encoder_filters, encoder_filter_sizes,
encoder_strides, encoder_neurons, learning_rate_encoder, learning_rate_r, learning_rate_t,
weight_decay, gamma, optimizer_encoder, optimizer_model, max_steps, batch_norm=True,
target_network=True, oracle_r=None, oracle_t=None, propagate_next_state=False,
z_transform=False, abs_z_transform=False, sigsoftmax=False, encoder_tau=1.0, model_tau=1.0,
no_tau_target_encoder=False, kl_penalty=False, kl_penalty_weight=0.01, small_r_init=False,
small_t_init=False):
super(ExpectationModelGaussian, self).__init__(
input_shape, num_blocks, num_actions, encoder_filters, encoder_filter_sizes, encoder_strides,
encoder_neurons, learning_rate_encoder, learning_rate_r, learning_rate_t, weight_decay, gamma,
optimizer_encoder, optimizer_model, max_steps, batch_norm=batch_norm, target_network=target_network,
oracle_r=oracle_r, oracle_t=oracle_t, propagate_next_state=propagate_next_state, z_transform=z_transform,
abs_z_transform=abs_z_transform, sigsoftmax=sigsoftmax, encoder_tau=encoder_tau, model_tau=model_tau,
no_tau_target_encoder=no_tau_target_encoder, kl_penalty=kl_penalty,
kl_penalty_weight=kl_penalty_weight, small_r_init=small_r_init, small_t_init=small_t_init
)
def build_model(self):
self.state_mu_t, self.state_sd_t, self.state_var_t, self.state_logits_t, self.state_softmax_t = \
self.build_encoder(
self.states_pl, self.hand_states_pl, self.encoder_tau, namespace=self.ENCODER_NAMESPACE
)
self.perplexity_t = tf.constant(2, dtype=tf.float32) ** (
- tf.reduce_mean(
tf.reduce_sum(
self.state_softmax_t * tf.log(self.state_softmax_t + 1e-7) /
tf.log(tf.constant(2, dtype=self.state_softmax_t.dtype)),
axis=1
),
axis=0
)
)
if self.no_tau_target_encoder:
target_tau = 1.0
else:
target_tau = self.encoder_tau
if self.target_network:
self.next_state_mu_t, self.next_state_sd_t, self.next_state_var_t, self.next_state_logits_t, \
self.next_state_softmax_t = \
self.build_encoder(
self.next_states_pl, self.next_hand_states_pl, target_tau, share_weights=False,
namespace=self.TARGET_ENCODER_NAMESPACE
)
self.build_target_update()
else:
self.next_state_mu_t, self.next_state_sd_t, self.next_state_var_t, self.next_state_logits_t, \
self.next_state_softmax_t = \
self.build_encoder(
self.next_states_pl, self.next_hand_states_pl, target_tau, share_weights=True,
namespace=self.ENCODER_NAMESPACE
)
self.build_r_model()
self.build_t_model()
def build_encoder(self, depth_pl, hand_state_pl, tau, share_weights=False, namespace=None):
x = tf.expand_dims(depth_pl, axis=-1)
with tf.variable_scope(namespace, reuse=share_weights):
for idx in range(len(self.encoder_filters)):
with tf.variable_scope("conv{:d}".format(idx + 1)):
x = tf.layers.conv2d(
x, self.encoder_filters[idx], self.encoder_filter_sizes[idx], self.encoder_strides[idx],
padding="SAME", activation=tf.nn.relu if not self.batch_norm else None,
kernel_regularizer=agent_utils.get_weight_regularizer(self.weight_decay),
kernel_initializer=agent_utils.get_mrsa_initializer(),
use_bias=not self.batch_norm
)
if self.batch_norm and idx != len(self.encoder_filters) - 1:
x = tf.layers.batch_normalization(x, training=self.is_training_pl)
x = tf.nn.relu(x)
x = tf.layers.flatten(x)
if self.batch_norm:
x = tf.layers.batch_normalization(x, training=self.is_training_pl)
x = tf.nn.relu(x)
x = tf.concat([x, hand_state_pl], axis=1)
for idx, neurons in enumerate(self.encoder_neurons):
with tf.variable_scope("fc{:d}".format(idx + 1)):
x = tf.layers.dense(
x, neurons, activation=tf.nn.relu if not self.batch_norm else None,
kernel_regularizer=agent_utils.get_weight_regularizer(self.weight_decay),
kernel_initializer=agent_utils.get_mrsa_initializer(),
use_bias=not self.batch_norm
)
if self.batch_norm:
x = tf.layers.batch_normalization(x, training=self.is_training_pl)
x = tf.nn.relu(x)
with tf.variable_scope("predict"):
mu_t = tf.layers.dense(
x, self.num_blocks, activation=None,
kernel_regularizer=agent_utils.get_weight_regularizer(self.weight_decay),
kernel_initializer=agent_utils.get_mrsa_initializer()
)
log_var_t = tf.layers.dense(
x, self.num_blocks, activation=None,
kernel_regularizer=agent_utils.get_weight_regularizer(self.weight_decay),
kernel_initializer=agent_utils.get_mrsa_initializer()
)
var_t = tf.exp(log_var_t)
sd_t = tf.sqrt(var_t)
noise_t = tf.random_normal(
shape=(tf.shape(mu_t)[0], self.num_blocks), mean=0, stddev=1.0
)
sample_t = mu_t + sd_t * noise_t
dist_t = tf.nn.softmax(sample_t / tau)
return mu_t, sd_t, var_t, sample_t, dist_t
def build_kl_penalty(self):
self.kl_penalty_weight_v = tf.Variable(self.kl_penalty_weight, trainable=False, dtype=tf.float32)
self.kl_penalty_weight_pl = tf.placeholder(tf.float32, shape=[], name="kl_penalty_weight_pl")
self.kl_penalty_weight_assign = tf.assign(self.kl_penalty_weight_v, self.kl_penalty_weight_pl)
if self.kl_penalty:
kl_divergence_t = 0.5 * (tf.square(self.state_mu_t) + self.state_var_t - tf.log(self.state_var_t) - 1.0)
self.kl_penalty_t = tf.reduce_mean(tf.reduce_sum(kl_divergence_t, axis=1), axis=0)
else:
self.kl_penalty_t = 0.0
class ExpectationModelGaussianWithQ(ExpectationModelGaussian):
def __init__(self, input_shape, num_blocks, num_actions, encoder_filters, encoder_filter_sizes,
encoder_strides, encoder_neurons, learning_rate_encoder, learning_rate_r, learning_rate_t, learning_rate_q,
weight_decay, reward_gamma, transition_gamma, optimizer_encoder, optimizer_model, max_steps, batch_norm=True,
target_network=True, oracle_r=None, oracle_t=None, propagate_next_state=False,
z_transform=False, abs_z_transform=False, sigsoftmax=False, encoder_tau=1.0, model_tau=1.0,
no_tau_target_encoder=False, kl_penalty=False, kl_penalty_weight=0.01, small_r_init=False,
small_t_init=False, small_q_init=False):
super(ExpectationModelGaussianWithQ, self).__init__(
input_shape, num_blocks, num_actions, encoder_filters, encoder_filter_sizes, encoder_strides,
encoder_neurons, learning_rate_encoder, learning_rate_r, learning_rate_t, weight_decay, transition_gamma,
optimizer_encoder, optimizer_model, max_steps, batch_norm=batch_norm, target_network=target_network,
oracle_r=oracle_r, oracle_t=oracle_t, propagate_next_state=propagate_next_state, z_transform=z_transform,
abs_z_transform=abs_z_transform, sigsoftmax=sigsoftmax, encoder_tau=encoder_tau, model_tau=model_tau,
no_tau_target_encoder=no_tau_target_encoder, kl_penalty=kl_penalty, kl_penalty_weight=kl_penalty_weight,
small_r_init=small_r_init, small_t_init=small_t_init
)
self.learning_rate_q = learning_rate_q
self.small_q_init = small_q_init
self.reward_gamma = reward_gamma
def validate(self, depths, hand_states, actions, rewards, q_values, next_depths, next_hand_states, dones,
batch_size=100):
num_steps = int(np.ceil(depths.shape[0] / batch_size))
losses = []
for i in range(num_steps):
batch_slice = np.index_exp[i * batch_size:(i + 1) * batch_size]
feed_dict = {
self.states_pl: depths[batch_slice],
self.hand_states_pl: hand_states[:, np.newaxis][batch_slice],
self.actions_pl: actions[batch_slice],
self.rewards_pl: rewards[batch_slice],
self.q_values_pl: q_values[batch_slice],
self.next_states_pl: next_depths[batch_slice],
self.next_hand_states_pl: next_hand_states[:, np.newaxis][batch_slice],
self.dones_pl: dones[batch_slice],
self.is_training_pl: False
}
l1, l2, l3 = self.session.run(
[self.full_q_loss_t, self.full_transition_loss_t, self.full_reward_loss_t], feed_dict=feed_dict
)
losses.append(np.transpose(np.array([l1, l2, l3]), axes=(1, 0)))
losses = np.concatenate(losses, axis=0)
return losses
def validate_and_encode(self, depths, hand_states, actions, rewards, q_values, next_depths, next_hand_states, dones,
batch_size=100):
num_steps = int(np.ceil(depths.shape[0] / batch_size))
losses = []
embeddings = []
for i in range(num_steps):
batch_slice = np.index_exp[i * batch_size:(i + 1) * batch_size]
feed_dict = {
self.states_pl: depths[batch_slice],
self.hand_states_pl: hand_states[:, np.newaxis][batch_slice],
self.actions_pl: actions[batch_slice],
self.rewards_pl: rewards[batch_slice],
self.q_values_pl: q_values[batch_slice],
self.next_states_pl: next_depths[batch_slice],
self.next_hand_states_pl: next_hand_states[:, np.newaxis][batch_slice],
self.dones_pl: dones[batch_slice],
self.is_training_pl: False
}
tmp_embeddings, l1, l2, l3 = self.session.run([
self.state_softmax_t, self.full_q_loss_t, self.full_transition_loss_t, self.full_reward_loss_t],
feed_dict=feed_dict
)
losses.append(np.transpose(np.array([l1, l2, l3]), axes=(1, 0)))
embeddings.append(tmp_embeddings)
losses = np.concatenate(losses, axis=0)
embeddings = np.concatenate(embeddings)
return losses, embeddings
def build_placeholders(self):
self.states_pl = tf.placeholder(tf.float32, shape=(None, *self.input_shape), name="states_pl")
self.actions_pl = tf.placeholder(tf.int32, shape=(None,), name="actions_pl")
self.rewards_pl = tf.placeholder(tf.float32, shape=(None,), name="rewards_pl")
self.q_values_pl = tf.placeholder(tf.float32, shape=(None, self.num_actions), name="q_values_pl")
self.dones_pl = tf.placeholder(tf.bool, shape=(None,), name="dones_pl")
self.next_states_pl = tf.placeholder(tf.float32, shape=(None, *self.input_shape), name="next_states_pl")
self.is_training_pl = tf.placeholder(tf.bool, shape=[], name="is_training_pl")
self.hand_states_pl = tf.placeholder(tf.float32, shape=(None, 1), name="hand_states_pl")
self.next_hand_states_pl = tf.placeholder(tf.float32, shape=(None, 1), name="next_hand_states_pl")
def build_model(self):
self.state_mu_t, self.state_sd_t, self.state_var_t, self.state_logits_t, self.state_softmax_t = \
self.build_encoder(
self.states_pl, self.hand_states_pl, self.encoder_tau, namespace=self.ENCODER_NAMESPACE
)
self.perplexity_t = tf.constant(2, dtype=tf.float32) ** (
- tf.reduce_mean(
tf.reduce_sum(
self.state_softmax_t * tf.log(self.state_softmax_t + 1e-7) /
tf.log(tf.constant(2, dtype=self.state_softmax_t.dtype)),
axis=1
),
axis=0
)
)
if self.no_tau_target_encoder:
target_tau = 1.0
else:
target_tau = self.encoder_tau
if self.target_network:
self.next_state_mu_t, self.next_state_sd_t, self.next_state_var_t, self.next_state_logits_t, \
self.next_state_softmax_t = \
self.build_encoder(
self.next_states_pl, self.next_hand_states_pl, target_tau, share_weights=False,
namespace=self.TARGET_ENCODER_NAMESPACE
)
self.build_target_update()
else:
self.next_state_mu_t, self.next_state_sd_t, self.next_state_var_t, self.next_state_logits_t, \
self.next_state_softmax_t = \
self.build_encoder(
self.next_states_pl, self.next_hand_states_pl, target_tau, share_weights=True,
namespace=self.ENCODER_NAMESPACE
)
self.build_r_model()
self.build_t_model()
self.build_q_model()
def build_q_model(self):
if self.small_q_init:
q_init = tf.random_normal_initializer(mean=0, stddev=0.1, dtype=tf.float32)
else:
q_init = tf.random_uniform_initializer(minval=0, maxval=1, dtype=tf.float32)
self.q_v = tf.get_variable(
"q_matrix", shape=(self.num_actions, self.num_blocks), dtype=tf.float32,
initializer=q_init
)
self.q_t = self.q_v
def build_training(self):
# prep
self.global_step = tf.train.get_or_create_global_step()
self.gather_matrices()
self.dones_float_t = tf.cast(self.dones_pl, tf.float32)
# build losses
self.build_q_loss()
self.build_reward_loss()
self.build_transition_loss()
self.build_regularization_loss()
self.build_kl_penalty()
# build full loss
self.gamma_v = tf.Variable(initial_value=self.gamma, trainable=False)
self.loss_t = self.q_loss_t + self.reward_gamma * self.reward_loss_t + \
tf.stop_gradient(self.gamma_v) * self.transition_loss_t + \
self.regularization_loss_t + self.kl_penalty_weight_v * self.kl_penalty_t
# build training
self.build_encoder_training()
self.build_model_training()
self.build_q_model_training()
if self.model_train_step is None:
self.model_train_step = self.q_step
else:
self.model_train_step = tf.group(self.model_train_step, self.q_step)
# integrate training into a single op
self.train_step = tf.group(self.encoder_train_step, self.model_train_step)
def build_q_loss(self):
term1 = tf.square(self.q_values_pl[:, :, tf.newaxis] - self.q_t[tf.newaxis, :, :])
term1 = tf.reduce_sum(term1, axis=1)
term2 = term1 * self.state_softmax_t
self.full_q_loss_t = (1 / 2) * tf.reduce_sum(term2, axis=1)
self.q_loss_t = tf.reduce_mean(self.full_q_loss_t, axis=0)
def build_q_model_training(self):
q_optimizer = agent_utils.get_optimizer(self.optimizer_model, self.learning_rate_q)
self.q_step = q_optimizer.minimize(
self.q_loss_t, var_list=[self.q_v]
)
class ExpectationModelContinuous:
ENCODER_NAMESPACE = "encoder"
TARGET_ENCODER_NAMESPACE = "target_encoder"
def __init__(self, input_shape, num_blocks, num_actions, encoder_filters, encoder_filter_sizes,
encoder_strides, encoder_neurons, learning_rate_encoder, learning_rate_r, learning_rate_t,
weight_decay, gamma, optimizer_encoder, optimizer_model, max_steps, batch_norm=True,
target_network=True, propagate_next_state=False, no_sample=False, softplus=False,
beta=0.0, zero_embedding_variance=False, old_bn_settings=False, bn_momentum=0.99):
if propagate_next_state:
assert not target_network
self.input_shape = input_shape
self.num_blocks = num_blocks
self.num_actions = num_actions
self.encoder_filters = encoder_filters
self.encoder_filter_sizes = encoder_filter_sizes
self.encoder_strides = encoder_strides
self.encoder_neurons = encoder_neurons
self.learning_rate_encoder = learning_rate_encoder
self.learning_rate_r = learning_rate_r
self.learning_rate_t = learning_rate_t
self.weight_decay = weight_decay
self.gamma = gamma
self.optimizer_encoder = optimizer_encoder
self.optimizer_model = optimizer_model
self.max_steps = max_steps
self.batch_norm = batch_norm
self.target_network = target_network
self.propagate_next_state = propagate_next_state
self.no_sample = no_sample
self.softplus = softplus
self.beta = beta
self.zero_embedding_variance = zero_embedding_variance
self.old_bn_settings = old_bn_settings
self.bn_momentum = bn_momentum
self.hand_states_pl, self.next_hand_states_pl = None, None
def encode(self, depths, hand_states, batch_size=100, zero_sd=False):
num_steps = int(np.ceil(depths.shape[0] / batch_size))
embeddings = []
for i in range(num_steps):
batch_slice = np.index_exp[i * batch_size:(i + 1) * batch_size]
feed_dict = {
self.states_pl: depths[batch_slice],
self.hand_states_pl: hand_states[batch_slice],
self.is_training_pl: False
}
if zero_sd:
feed_dict[self.state_sd_t] = np.zeros(
(len(depths[batch_slice]), self.num_blocks), dtype=np.float32
)
embedding = self.session.run(self.state_mu_t, feed_dict=feed_dict)
embeddings.append(embedding)
embeddings = np.concatenate(embeddings, axis=0)
return embeddings
def predict_next_states(self, depths, hand_states, actions, batch_size=100, zero_sd=False):
num_steps = int(np.ceil(depths.shape[0] / batch_size))
next_states = []
for i in range(num_steps):
batch_slice = np.index_exp[i * batch_size:(i + 1) * batch_size]
feed_dict = {
self.states_pl: depths[batch_slice],
self.hand_states_pl: hand_states[batch_slice],
self.actions_pl: actions[batch_slice],
self.is_training_pl: False
}
if zero_sd:
feed_dict[self.state_sd_t] = np.zeros(
(len(depths[batch_slice]), self.num_blocks), dtype=np.float32
)
tmp_next_states = self.session.run(self.transformed_logits, feed_dict=feed_dict)
next_states.append(tmp_next_states)
next_states = np.concatenate(next_states, axis=0)
return next_states
def predict_rewards(self, depths, hand_states, actions, batch_size=100, zero_sd=False):
num_steps = int(np.ceil(depths.shape[0] / batch_size))
rewards = []
for i in range(num_steps):
batch_slice = np.index_exp[i * batch_size:(i + 1) * batch_size]
feed_dict = {
self.states_pl: depths[batch_slice],
self.hand_states_pl: hand_states[batch_slice],
self.actions_pl: actions[batch_slice],
self.is_training_pl: False
}
if zero_sd:
feed_dict[self.state_sd_t] = np.zeros(
(len(depths[batch_slice]), self.num_blocks), dtype=np.float32
)
tmp_rewards = self.session.run(self.reward_prediction_t, feed_dict=feed_dict)
rewards.append(tmp_rewards)
rewards = np.concatenate(rewards, axis=0)
return rewards
def validate(self, depths, hand_states, actions, rewards, next_depths, next_hand_states, dones, batch_size=100):
num_steps = int(np.ceil(depths.shape[0] / batch_size))
losses = []
for i in range(num_steps):
batch_slice = np.index_exp[i * batch_size:(i + 1) * batch_size]
feed_dict = {
self.states_pl: depths[batch_slice],
self.hand_states_pl: hand_states[:, np.newaxis][batch_slice],
self.actions_pl: actions[batch_slice],
self.rewards_pl: rewards[batch_slice],
self.next_states_pl: next_depths[batch_slice],
self.next_hand_states_pl: next_hand_states[:, np.newaxis][batch_slice],
self.dones_pl: dones[batch_slice],
self.is_training_pl: False
}
l1, l2 = self.session.run([self.full_transition_loss_t, self.full_reward_loss_t], feed_dict=feed_dict)
losses.append(np.transpose(np.array([l1, l2]), axes=(1, 0)))
losses = np.concatenate(losses, axis=0)
return losses
def validate_and_encode(self, depths, hand_states, actions, rewards, next_depths, next_hand_states, dones,
batch_size=100):
num_steps = int(np.ceil(depths.shape[0] / batch_size))
losses = []
embeddings = []
for i in range(num_steps):
batch_slice = np.index_exp[i * batch_size:(i + 1) * batch_size]
feed_dict = {
self.states_pl: depths[batch_slice],
self.hand_states_pl: hand_states[:, np.newaxis][batch_slice],
self.actions_pl: actions[batch_slice],
self.rewards_pl: rewards[batch_slice],
self.next_states_pl: next_depths[batch_slice],
self.next_hand_states_pl: next_hand_states[:, np.newaxis][batch_slice],
self.dones_pl: dones[batch_slice],
self.is_training_pl: False
}
tmp_embeddings, l1, l2 = self.session.run([
self.state_mu_t, self.full_transition_loss_t, self.full_reward_loss_t], feed_dict=feed_dict
)
losses.append(np.transpose(np.array([l1, l2]), axes=(1, 0)))
embeddings.append(tmp_embeddings)
losses = np.concatenate(losses, axis=0)
embeddings = np.concatenate(embeddings)
return losses, embeddings
def build(self):
self.build_placeholders()
self.build_model()
self.build_training()
self.saver = tf.train.Saver()
def build_placeholders(self):
self.states_pl = tf.placeholder(tf.float32, shape=(None, *self.input_shape), name="states_pl")
self.actions_pl = tf.placeholder(tf.int32, shape=(None,), name="actions_pl")
self.rewards_pl = tf.placeholder(tf.float32, shape=(None,), name="rewards_pl")
self.dones_pl = tf.placeholder(tf.bool, shape=(None,), name="dones_pl")
self.next_states_pl = tf.placeholder(tf.float32, shape=(None, *self.input_shape), name="next_states_pl")
self.is_training_pl = tf.placeholder(tf.bool, shape=[], name="is_training_pl")
self.hand_states_pl = tf.placeholder(tf.float32, shape=(None, 1), name="hand_states_pl")
self.next_hand_states_pl = tf.placeholder(tf.float32, shape=(None, 1), name="next_hand_states_pl")
def build_model(self):
self.state_mu_t, self.state_var_t, self.state_sd_t, self.state_sample_t = \
self.build_encoder(self.states_pl, self.hand_states_pl)
if self.target_network:
self.next_state_mu_t, self.next_state_var_t, self.next_state_sd_t, self.next_state_sample_t = self.build_encoder(
self.next_states_pl, self.next_hand_states_pl, share_weights=False,
namespace=self.TARGET_ENCODER_NAMESPACE
)
self.build_target_update()
else:
self.next_state_mu_t, self.next_state_var_t, self.next_state_sd_t, self.next_state_sample_t = self.build_encoder(
self.next_states_pl, self.next_hand_states_pl, share_weights=True
)
self.r_v = tf.get_variable(
"reward_matrix", shape=(self.num_actions, self.num_blocks), dtype=tf.float32,
initializer=tf.random_normal_initializer(mean=0, stddev=np.sqrt(2 / self.num_blocks), dtype=tf.float32)
)
self.t_v = tf.get_variable(
"transition_matrix", shape=(self.num_actions, self.num_blocks, self.num_blocks), dtype=tf.float32,
initializer=tf.random_normal_initializer(mean=0, stddev=np.sqrt(2 / self.num_blocks), dtype=tf.float32)
)
def build_training(self):
# create global training step variable
self.global_step = tf.train.get_or_create_global_step()
self.float_dones_t = tf.cast(self.dones_pl, tf.float32)
# gather appropriate transition matrices
self.gather_r_t = tf.gather(self.r_v, self.actions_pl)
self.gather_t_t = tf.gather(self.t_v, self.actions_pl)
# build losses
self.build_reward_loss()
self.build_transition_loss()
self.build_weight_decay_loss()
self.build_kl_loss()
# build the whole loss
self.gamma_v = tf.Variable(initial_value=self.gamma, trainable=False)
self.loss_t = self.reward_loss_t + tf.stop_gradient(self.gamma_v) * self.transition_loss_t + \
self.regularization_loss_t + self.beta * self.kl_loss_t
# build training
self.build_encoder_training()
self.build_r_model_training()
self.build_t_model_training()
self.model_train_step = tf.group(self.r_step, self.t_step)
self.train_step = tf.group(self.encoder_train_step, self.model_train_step)
def build_reward_loss(self):
self.reward_prediction_t = tf.reduce_sum(self.state_sample_t * self.gather_r_t, axis=1)
term1 = tf.square(
self.rewards_pl - self.reward_prediction_t
)
self.full_reward_loss_t = (1 / 2) * term1
self.reward_loss_t = tf.reduce_mean(self.full_reward_loss_t, axis=0)
def build_transition_loss(self):
self.transformed_logits = tf.matmul(self.state_sample_t[:, tf.newaxis, :], self.gather_t_t)
self.transformed_logits = self.transformed_logits[:, 0, :]
if self.propagate_next_state:
term1 = tf.reduce_sum(tf.square(self.next_state_sample_t - self.transformed_logits), axis=1)
else:
term1 = tf.reduce_sum(tf.square(tf.stop_gradient(self.next_state_sample_t) - self.transformed_logits),
axis=1)
self.full_transition_loss_t = (1 / 2) * term1 * (1 - self.float_dones_t)
self.transition_loss_t = tf.reduce_sum(self.full_transition_loss_t, axis=0) / tf.reduce_max(
[1.0, tf.reduce_sum(1 - self.float_dones_t)])
def build_weight_decay_loss(self):
reg = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
if len(reg) > 0:
self.regularization_loss_t = tf.add_n(reg)
else:
self.regularization_loss_t = 0
def build_kl_loss(self):
self.kl_loss_t = 0.0
if self.beta is not None and self.beta > 0.0:
self.kl_divergence_t = 0.5 * (
tf.square(self.state_mu_t) + self.state_var_t - tf.log(self.state_var_t + 1e-5) - 1.0)
self.kl_loss_t = tf.reduce_mean(tf.reduce_sum(self.kl_divergence_t, axis=1))
def build_encoder_training(self):
encoder_variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=self.ENCODER_NAMESPACE)
encoder_optimizer = agent_utils.get_optimizer(self.optimizer_encoder, self.learning_rate_encoder)
self.encoder_train_step = encoder_optimizer.minimize(
self.loss_t, global_step=self.global_step, var_list=encoder_variables
)
if self.batch_norm:
self.update_op = tf.group(*tf.get_collection(tf.GraphKeys.UPDATE_OPS))
self.encoder_train_step = tf.group(self.encoder_train_step, self.update_op)
def build_r_model_training(self):
r_optimizer = agent_utils.get_optimizer(self.optimizer_model, self.learning_rate_r)
self.r_step = r_optimizer.minimize(
self.reward_loss_t, var_list=[self.r_v]
)
def build_t_model_training(self):
t_optimizer = agent_utils.get_optimizer(self.optimizer_model, self.learning_rate_t)
self.t_step = t_optimizer.minimize(
self.transition_loss_t, var_list=[self.t_v]
)
def build_encoder(self, depth_pl, hand_state_pl, share_weights=False, namespace=ENCODER_NAMESPACE):
if len(depth_pl.shape) == 3:
x = tf.expand_dims(depth_pl, axis=-1)
elif len(depth_pl.shape) != 4:
raise ValueError("Weird depth shape?")
else:
x = depth_pl
with tf.variable_scope(namespace, reuse=share_weights):
for idx in range(len(self.encoder_filters)):
with tf.variable_scope("conv{:d}".format(idx + 1)):
x = tf.layers.conv2d(
x, self.encoder_filters[idx], self.encoder_filter_sizes[idx], self.encoder_strides[idx],
padding="SAME", activation=tf.nn.relu if not self.batch_norm else None,
kernel_regularizer=agent_utils.get_weight_regularizer(self.weight_decay),
kernel_initializer=agent_utils.get_mrsa_initializer(),
use_bias=not self.batch_norm
)
if self.batch_norm and idx != len(self.encoder_filters) - 1:
if self.old_bn_settings:
x = tf.layers.batch_normalization(
x, training=self.is_training_pl, trainable=not share_weights, momentum=self.bn_momentum
)
else:
x = tf.layers.batch_normalization(
x, training=self.is_training_pl if not share_weights else False,
trainable=not share_weights, momentum=self.bn_momentum
)
x = tf.nn.relu(x)
x = tf.layers.flatten(x)
if self.batch_norm:
if self.old_bn_settings:
x = tf.layers.batch_normalization(
x, training=self.is_training_pl, trainable=not share_weights, momentum=self.bn_momentum
)
else:
x = tf.layers.batch_normalization(
x, training=self.is_training_pl if not share_weights else False,
trainable=not share_weights, momentum=self.bn_momentum
)
x = tf.nn.relu(x)
x = tf.concat([x, hand_state_pl], axis=1)
for idx, neurons in enumerate(self.encoder_neurons):
with tf.variable_scope("fc{:d}".format(idx + 1)):
x = tf.layers.dense(
x, neurons, activation=tf.nn.relu if not self.batch_norm else None,
kernel_regularizer=agent_utils.get_weight_regularizer(self.weight_decay),
kernel_initializer=agent_utils.get_mrsa_initializer(),
use_bias=not self.batch_norm
)
if self.batch_norm:
if self.old_bn_settings:
x = tf.layers.batch_normalization(
x, training=self.is_training_pl, trainable=not share_weights, momentum=self.bn_momentum
)
else:
x = tf.layers.batch_normalization(
x, training=self.is_training_pl if not share_weights else False,
trainable=not share_weights, momentum=self.bn_momentum
)
x = tf.nn.relu(x)
with tf.variable_scope("predict"):
mu = tf.layers.dense(
x, self.num_blocks, activation=None,
kernel_regularizer=agent_utils.get_weight_regularizer(self.weight_decay),
kernel_initializer=agent_utils.get_mrsa_initializer()
)
sigma = tf.layers.dense(
x, self.num_blocks, activation=None,
kernel_regularizer=agent_utils.get_weight_regularizer(self.weight_decay),
kernel_initializer=agent_utils.get_mrsa_initializer()
)
if self.no_sample:
sample = mu
var_t = None
else:
noise = tf.random_normal(
shape=(tf.shape(mu)[0], self.num_blocks), mean=0, stddev=1.0
)
if self.softplus:
# log var is sd
sd = tf.nn.softplus(sigma)
if self.zero_embedding_variance:
sd = sd * 0.0
sd_noise_t = noise * sd
sample = mu + sd_noise_t
var_t = tf.square(sd)
else:
var_t = tf.exp(sigma)
if self.zero_embedding_variance:
var_t = var_t * 0.0
sd = tf.sqrt(var_t)
sd_noise_t = noise * sd
sample = mu + sd_noise_t
return mu, var_t, sd, sample
def build_target_update(self):
source_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=self.ENCODER_NAMESPACE)
target_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=self.TARGET_ENCODER_NAMESPACE)
assert len(source_vars) == len(target_vars) and len(source_vars) > 0
update_ops = []
for source_var, target_var in zip(source_vars, target_vars):
update_ops.append(tf.assign(target_var, source_var))
self.target_update_op = tf.group(*update_ops)
def build_summaries(self):
# losses
tf.summary.scalar("loss", self.loss_t)
tf.summary.scalar("transition_loss", tf.reduce_mean(self.transition_loss_t))
tf.summary.scalar("reward_loss", tf.reduce_mean(self.reward_loss_t))
# logits and softmax
self.summarize(self.state_mu_t, "means")
self.summarize(self.state_var_t, "vars")
self.summarize(self.state_sample_t, "samples")
# matrices
self.summarize(self.r_v, "R")
self.summarize(self.t_v, "T")
def set_gamma(self, value):
self.session.run(tf.assign(self.gamma_v, value))
def start_session(self, gpu_memory=None):
gpu_options = None
if gpu_memory is not None:
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=gpu_memory)
tf_config = tf.ConfigProto(gpu_options=gpu_options)
self.session = tf.Session(config=tf_config)
self.session.run(tf.global_variables_initializer())
def stop_session(self):
if self.session is not None:
self.session.close()
def load(self, path):
self.saver.restore(self.session, path)
def save(self, path):
path_dir = os.path.dirname(path)
if len(path_dir) > 0 and not os.path.isdir(path_dir):
os.makedirs(path_dir)
self.saver.save(self.session, path)
def save_matrices_as_images(self, step, save_dir, ext="pdf"):
r, p = self.session.run([self.r_v, self.t_v])
r = np.reshape(r, (r.shape[0], -1))
p = np.reshape(p, (p.shape[0], -1))
r_path = os.path.join(save_dir, "r_{:d}.{}".format(step, ext))
p_path = os.path.join(save_dir, "p_{:d}.{}".format(step, ext))
plt.clf()
plt.imshow(r, vmin=-0.5, vmax=1.5)
plt.colorbar()
plt.savefig(r_path)
plt.clf()
plt.imshow(p, vmin=0, vmax=1)
plt.colorbar()
plt.savefig(p_path)
def summarize(self, var, name):
tf.summary.scalar(name + "_mean", tf.reduce_mean(var))
tf.summary.scalar(name + "_min", tf.reduce_min(var))
tf.summary.scalar(name + "_max", tf.reduce_max(var))
tf.summary.histogram(name + "_hist", var)
class ExpectationModelVQ(ExpectationModelContinuous):
def __init__(self, input_shape, num_embeddings, dimensionality, num_actions, encoder_filters, encoder_filter_sizes,
encoder_strides, encoder_neurons, learning_rate_encoder, learning_rate_r, learning_rate_t,
weight_decay, gamma_1, optimizer_encoder, optimizer_model,
max_steps, batch_norm=True, target_network=True, propagate_next_state=False, no_sample=False,
softplus=False, alpha=0.1, beta=0.0):
ExpectationModelContinuous.__init__(
self, input_shape, dimensionality, num_actions, encoder_filters, encoder_filter_sizes, encoder_strides,
encoder_neurons, learning_rate_encoder, learning_rate_r, learning_rate_t, weight_decay, gamma_1,
optimizer_encoder, optimizer_model, max_steps, batch_norm=batch_norm, target_network=target_network,
propagate_next_state=propagate_next_state, no_sample=no_sample, softplus=softplus, beta=beta
)
self.num_embeddings = num_embeddings
self.dimensionality = dimensionality
self.aplha = alpha
def build_model(self):
self.state_mu_t = \
self.build_encoder(self.states_pl, self.hand_states_pl, namespace=self.ENCODER_NAMESPACE)
self.embeds = tf.get_variable(
"embeddings", [self.num_embeddings, self.dimensionality],
initializer=tf.truncated_normal_initializer(stddev=0.02)
)
self.state_sample_t, self.state_classes_t = self.quantize(self.state_mu_t)
if self.target_network:
self.next_state_mu_t = self.build_encoder(
self.next_states_pl, self.next_hand_states_pl, share_weights=False,
namespace=self.TARGET_ENCODER_NAMESPACE
)
self.build_target_update()
else:
self.next_state_mu_t = self.build_encoder(
self.next_states_pl, self.next_hand_states_pl, share_weights=True, namespace=self.ENCODER_NAMESPACE
)
self.next_state_sample_t, self.next_state_classes_t = self.quantize(self.next_state_mu_t)
self.r_v = tf.get_variable(
"reward_matrix", shape=(self.num_actions, self.dimensionality), dtype=tf.float32,
initializer=tf.random_normal_initializer(mean=0, stddev=np.sqrt(2 / self.dimensionality), dtype=tf.float32)
)
self.t_v = tf.get_variable(
"transition_matrix", shape=(self.num_actions, self.dimensionality, self.dimensionality), dtype=tf.float32,
initializer=tf.random_normal_initializer(mean=0, stddev=np.sqrt(2 / self.dimensionality), dtype=tf.float32)
)
def quantize(self, prediction):
diff = prediction[:, tf.newaxis, :] - self.embeds[tf.newaxis, :, :]
norm = tf.norm(diff, axis=2)
classes = tf.argmin(norm, axis=1)
return tf.gather(self.embeds, classes), classes
def build_training(self):
# create global training step variable
self.global_step = tf.train.get_or_create_global_step()
self.float_dones_t = tf.cast(self.dones_pl, tf.float32)
# gather appropriate transition matrices
self.gather_r_t = tf.gather(self.r_v, self.actions_pl)
self.gather_t_t = tf.gather(self.t_v, self.actions_pl)
# build losses
self.build_reward_loss()
self.build_transition_loss()
self.build_left_and_right_embedding_losses()
self.build_weight_decay_loss()
# build the whole loss
self.gamma_v = tf.Variable(initial_value=self.gamma, trainable=False)
self.main_loss_t = self.reward_loss_t + tf.stop_gradient(self.gamma_v) * self.transition_loss_t + \
self.regularization_loss_t
self.loss_t = self.main_loss_t + self.right_loss_t
# build training
self.build_encoder_and_embedding_training()
self.build_r_model_training()
self.build_t_model_training()
self.model_train_step = tf.group(self.r_step, self.t_step)
self.train_step = tf.group(self.encoder_train_step, self.model_train_step)
def build_left_and_right_embedding_losses(self):
self.left_loss_t = tf.reduce_mean(
tf.norm(tf.stop_gradient(self.state_mu_t) - self.state_sample_t, axis=1) ** 2
)
self.right_loss_t = tf.reduce_mean(
tf.norm(self.state_mu_t - tf.stop_gradient(self.state_sample_t), axis=1) ** 2
)
def build_reward_loss(self):
self.reward_prediction_t = tf.reduce_sum(self.state_sample_t * self.gather_r_t, axis=1)
term1 = tf.square(
self.rewards_pl - self.reward_prediction_t
)
self.full_reward_loss_t = (1 / 2) * term1
self.reward_loss_t = tf.reduce_mean(self.full_reward_loss_t, axis=0)
def build_transition_loss(self):
self.transformed_logits = tf.matmul(self.state_sample_t[:, tf.newaxis, :], self.gather_t_t)
self.transformed_logits = self.transformed_logits[:, 0, :]
if self.propagate_next_state:
term1 = tf.reduce_mean(tf.square(self.next_state_sample_t - self.transformed_logits), axis=1)
else:
term1 = tf.reduce_mean(tf.square(tf.stop_gradient(self.next_state_sample_t) - self.transformed_logits),
axis=1)
self.full_transition_loss_t = (1 / 2) * term1 * (1 - self.float_dones_t)
self.transition_loss_t = tf.reduce_sum(self.full_transition_loss_t, axis=0) / tf.reduce_max(
[1.0, tf.reduce_sum(1 - self.float_dones_t)])
def build_encoder_and_embedding_training(self):
encoder_variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=self.ENCODER_NAMESPACE)
encoder_optimizer = agent_utils.get_optimizer(self.optimizer_encoder, self.learning_rate_encoder)
grad_z = tf.gradients(self.main_loss_t, self.state_sample_t)
encoder_grads = [(tf.gradients(self.state_mu_t, var, grad_z)[0] + self.aplha *
tf.gradients(self.right_loss_t, var)[0], var) for var in encoder_variables]
embed_grads = list(zip(tf.gradients(self.left_loss_t, self.embeds), [self.embeds]))
self.encoder_train_step = encoder_optimizer.apply_gradients(
encoder_grads + embed_grads
)
if self.batch_norm:
self.update_op = tf.group(*tf.get_collection(tf.GraphKeys.UPDATE_OPS))
self.encoder_train_step = tf.group(self.encoder_train_step, self.update_op)
def build_encoder(self, depth_pl, hand_state_pl, share_weights=False, namespace=None):
x = tf.expand_dims(depth_pl, axis=-1)
with tf.variable_scope(namespace, reuse=share_weights):
for idx in range(len(self.encoder_filters)):
with tf.variable_scope("conv{:d}".format(idx + 1)):
x = tf.layers.conv2d(
x, self.encoder_filters[idx], self.encoder_filter_sizes[idx], self.encoder_strides[idx],
padding="SAME", activation=tf.nn.relu if not self.batch_norm else None,
kernel_regularizer=agent_utils.get_weight_regularizer(self.weight_decay),
kernel_initializer=agent_utils.get_mrsa_initializer(),
use_bias=not self.batch_norm
)
if self.batch_norm and idx != len(self.encoder_filters) - 1:
x = tf.layers.batch_normalization(x, training=self.is_training_pl)
x = tf.nn.relu(x)
x = tf.layers.flatten(x)
if self.batch_norm:
x = tf.layers.batch_normalization(x, training=self.is_training_pl)
x = tf.nn.relu(x)
x = tf.concat([x, hand_state_pl], axis=1)
for idx, neurons in enumerate(self.encoder_neurons):
with tf.variable_scope("fc{:d}".format(idx + 1)):
x = tf.layers.dense(
x, neurons, activation=tf.nn.relu if not self.batch_norm else None,
kernel_regularizer=agent_utils.get_weight_regularizer(self.weight_decay),
kernel_initializer=agent_utils.get_mrsa_initializer(),
use_bias=not self.batch_norm
)
if self.batch_norm:
x = tf.layers.batch_normalization(x, training=self.is_training_pl)
x = tf.nn.relu(x)
with tf.variable_scope("predict"):
mu = tf.layers.dense(
x, self.num_blocks, activation=None,
kernel_regularizer=agent_utils.get_weight_regularizer(self.weight_decay),
kernel_initializer=agent_utils.get_mrsa_initializer()
)
return mu
class ExpectationModelContinuousNNTransitions(ExpectationModelContinuous):
def __init__(self, input_shape, num_blocks, num_actions, encoder_filters, encoder_filter_sizes,
encoder_strides, encoder_neurons, learning_rate_encoder, learning_rate_r, learning_rate_t,
transition_neurons, weight_decay, gamma_1, optimizer_encoder, optimizer_model,
max_steps, batch_norm=True, target_network=True, propagate_next_state=False, no_sample=False,
softplus=False, beta=0.0):
ExpectationModelContinuous.__init__(
self, input_shape, num_blocks, num_actions, encoder_filters, encoder_filter_sizes, encoder_strides,
encoder_neurons, learning_rate_encoder, learning_rate_r, learning_rate_t, weight_decay, gamma_1,
optimizer_encoder, optimizer_model, max_steps, batch_norm=batch_norm, target_network=target_network,
propagate_next_state=propagate_next_state, no_sample=no_sample, softplus=softplus, beta=beta
)
self.transition_neurons = transition_neurons
def build_model(self):
# TODO: throw out t_v; either accept mu_t or sample_t
self.state_mu_t, self.state_var_t, self.state_sd_t, self.state_sample_t = \
self.build_encoder(self.states_pl, self.hand_states_pl)
if self.target_network:
self.next_state_mu_t, self.next_state_var_t, self.next_state_sd_t, self.next_state_sample_t = self.build_encoder(
self.next_states_pl, self.next_hand_states_pl, share_weights=False,
namespace=self.TARGET_ENCODER_NAMESPACE
)
self.build_target_update()
else:
self.next_state_mu_t, self.next_state_var_t, self.next_state_sd_t, self.next_state_sample_t = self.build_encoder(
self.next_states_pl, self.next_hand_states_pl, share_weights=True
)
self.r_v = tf.get_variable(
"reward_matrix", shape=(self.num_actions, self.num_blocks), dtype=tf.float32,
initializer=tf.random_normal_initializer(mean=0, stddev=np.sqrt(2 / self.num_blocks), dtype=tf.float32)
)
self.t_v = tf.get_variable(
"transition_matrix", shape=(self.num_actions, self.num_blocks, self.num_blocks), dtype=tf.float32,
initializer=tf.random_normal_initializer(mean=0, stddev=np.sqrt(2 / self.num_blocks), dtype=tf.float32)
)
def build_transition_nn(self, embedding):
# TODO: needs to accept actions as well
x = embedding
with tf.variable_scope("transition"):
for idx, neurons in enumerate(self.transition_neurons):
with tf.variable_scope("fc{:d}".format(idx)):
if idx == len(self.transition_neurons) - 1:
x = tf.layers.dense(
x, neurons, activation=None,
kernel_regularizer=agent_utils.get_weight_regularizer(self.weight_decay),
kernel_initializer=agent_utils.get_mrsa_initializer()
)
else:
x = tf.layers.dense(
x, neurons, activation=tf.nn.relu if not self.batch_norm else None,
kernel_regularizer=agent_utils.get_weight_regularizer(self.weight_decay),
kernel_initializer=agent_utils.get_mrsa_initializer(),
use_bias=not self.batch_norm
)
if self.batch_norm:
x = tf.layers.batch_normalization(x, training=self.is_training_pl)
x = tf.nn.relu(x)
assert embedding.shape[1] == x.shape[1]
return x
class ExpectationModelContinuousWithQ(ExpectationModelContinuous):
def __init__(self, input_shape, num_blocks, num_actions, encoder_filters, encoder_filter_sizes,
encoder_strides, encoder_neurons, learning_rate_encoder, learning_rate_r, learning_rate_t,
learning_rate_q, weight_decay, gamma_1, gamma_2, optimizer_encoder, optimizer_model,
max_steps, batch_norm=True, target_network=True, propagate_next_state=False, no_sample=False,
softplus=False, beta=0.0, old_bn_settings=False, bn_momentum=0.99):
ExpectationModelContinuous.__init__(
self, input_shape, num_blocks, num_actions, encoder_filters, encoder_filter_sizes, encoder_strides,
encoder_neurons, learning_rate_encoder, learning_rate_r, learning_rate_t, weight_decay, gamma_1,
optimizer_encoder, optimizer_model, max_steps, batch_norm=batch_norm, target_network=target_network,
propagate_next_state=propagate_next_state, no_sample=no_sample, softplus=softplus, beta=beta,
old_bn_settings=old_bn_settings, bn_momentum=bn_momentum
)
self.gamma_2 = gamma_2
self.learning_rate_q = learning_rate_q
def validate(self, depths, hand_states, actions, rewards, q_values, next_depths, next_hand_states, dones,
batch_size=100):
num_steps = int(np.ceil(depths.shape[0] / batch_size))
losses = []
for i in range(num_steps):
batch_slice = np.index_exp[i * batch_size:(i + 1) * batch_size]
feed_dict = {
self.states_pl: depths[batch_slice],
self.hand_states_pl: hand_states[:, np.newaxis][batch_slice],
self.actions_pl: actions[batch_slice],
self.rewards_pl: rewards[batch_slice],
self.q_values_pl: q_values[batch_slice],
self.next_states_pl: next_depths[batch_slice],
self.next_hand_states_pl: next_hand_states[:, np.newaxis][batch_slice],
self.dones_pl: dones[batch_slice],
self.is_training_pl: False
}
l1, l2, l3 = self.session.run(
[self.full_q_loss_t, self.full_transition_loss_t, self.full_reward_loss_t], feed_dict=feed_dict
)
losses.append(np.transpose(np.array([l1, l2, l3]), axes=(1, 0)))
losses = np.concatenate(losses, axis=0)
return losses
def validate_and_encode(self, depths, hand_states, actions, rewards, q_values, next_depths, next_hand_states, dones,
batch_size=100):
num_steps = int(np.ceil(depths.shape[0] / batch_size))
losses = []
embeddings = []
for i in range(num_steps):
batch_slice = np.index_exp[i * batch_size:(i + 1) * batch_size]
feed_dict = {
self.states_pl: depths[batch_slice],
self.hand_states_pl: hand_states[:, np.newaxis][batch_slice],
self.actions_pl: actions[batch_slice],
self.rewards_pl: rewards[batch_slice],
self.q_values_pl: q_values[batch_slice],
self.next_states_pl: next_depths[batch_slice],
self.next_hand_states_pl: next_hand_states[:, np.newaxis][batch_slice],
self.dones_pl: dones[batch_slice],
self.is_training_pl: False
}
tmp_embeddings, l1, l2, l3 = self.session.run([
self.state_mu_t, self.full_q_loss_t, self.full_transition_loss_t, self.full_reward_loss_t],
feed_dict=feed_dict
)
losses.append(np.transpose(np.array([l1, l2, l3]), axes=(1, 0)))
embeddings.append(tmp_embeddings)
losses = np.concatenate(losses, axis=0)
embeddings = np.concatenate(embeddings)
return losses, embeddings
def build_placeholders(self):
self.states_pl = tf.placeholder(tf.float32, shape=(None, *self.input_shape), name="states_pl")
self.actions_pl = tf.placeholder(tf.int32, shape=(None,), name="actions_pl")
self.rewards_pl = tf.placeholder(tf.float32, shape=(None,), name="rewards_pl")
self.q_values_pl = tf.placeholder(tf.float32, shape=(None, self.num_actions), name="q_values_pl")
self.dones_pl = tf.placeholder(tf.bool, shape=(None,), name="dones_pl")
self.next_states_pl = tf.placeholder(tf.float32, shape=(None, *self.input_shape), name="next_states_pl")
self.is_training_pl = tf.placeholder(tf.bool, shape=[], name="is_training_pl")
self.hand_states_pl = tf.placeholder(tf.float32, shape=(None, 1), name="hand_states_pl")
self.next_hand_states_pl = tf.placeholder(tf.float32, shape=(None, 1), name="next_hand_states_pl")
def build_model(self):
self.build_encoders()
self.build_linear_models()
def build_encoders(self):
self.state_mu_t, self.state_var_t, self.state_sd_t, self.state_sample_t = \
self.build_encoder(self.states_pl, self.hand_states_pl)
if self.target_network:
self.next_state_mu_t, self.next_state_var_t, self.next_state_sd_t, self.next_state_sample_t = self.build_encoder(
self.next_states_pl, self.next_hand_states_pl, share_weights=False,
namespace=self.TARGET_ENCODER_NAMESPACE
)
self.build_target_update()
else:
self.next_state_mu_t, self.next_state_var_t, self.next_state_sd_t, self.next_state_sample_t = self.build_encoder(
self.next_states_pl, self.next_hand_states_pl, share_weights=True
)
def build_linear_models(self):
self.q_v = tf.get_variable(
"q_values_matrix", shape=(self.num_actions, self.num_blocks), dtype=tf.float32,
initializer=tf.random_normal_initializer(mean=0, stddev=np.sqrt(2 / self.num_blocks), dtype=tf.float32)
)
self.r_v = tf.get_variable(
"reward_matrix", shape=(self.num_actions, self.num_blocks), dtype=tf.float32,
initializer=tf.random_normal_initializer(mean=0, stddev=np.sqrt(2 / self.num_blocks), dtype=tf.float32)
)
self.t_v = tf.get_variable(
"transition_matrix", shape=(self.num_actions, self.num_blocks, self.num_blocks), dtype=tf.float32,
initializer=tf.random_normal_initializer(mean=0, stddev=np.sqrt(2 / self.num_blocks), dtype=tf.float32)
)
def build_training(self):
# create global training step variable
self.global_step = tf.train.get_or_create_global_step()
self.float_dones_t = tf.cast(self.dones_pl, tf.float32)
# gather appropriate transition matrices
self.gather_r_t = tf.gather(self.r_v, self.actions_pl)
self.gather_t_t = tf.gather(self.t_v, self.actions_pl)
# build losses
self.build_q_loss()
self.build_reward_loss()
self.build_transition_loss()
self.build_weight_decay_loss()
self.build_kl_loss()
# build the whole loss
self.gamma_v = tf.Variable(initial_value=self.gamma, trainable=False)
self.loss_t = self.q_loss_t + self.gamma_2 * self.reward_loss_t + \
tf.stop_gradient(self.gamma_v) * self.transition_loss_t + \
self.regularization_loss_t + self.beta * self.kl_loss_t
# build training
self.build_encoder_training()
self.build_q_model_training()
self.build_r_model_training()
self.build_t_model_training()
self.model_train_step = tf.group(self.q_step, self.r_step, self.t_step)
self.train_step = tf.group(self.encoder_train_step, self.model_train_step)
def build_q_loss(self):
self.q_prediction_t = tf.reduce_sum(self.state_sample_t[:, tf.newaxis, :] * self.q_v[tf.newaxis, :, :], axis=2)
term1 = tf.reduce_mean(tf.square(self.q_values_pl - self.q_prediction_t), axis=1)
self.full_q_loss_t = (1 / 2) * term1
self.q_loss_t = tf.reduce_mean(self.full_q_loss_t, axis=0)
def build_q_model_training(self):
q_optimizer = agent_utils.get_optimizer(self.optimizer_model, self.learning_rate_q)
self.q_step = q_optimizer.minimize(
self.q_loss_t, var_list=[self.q_v]
)
class ExpectationModelVQWithQ(ExpectationModelVQ):
def __init__(self, input_shape, num_blocks, dimensionality, num_actions, encoder_filters, encoder_filter_sizes,
encoder_strides, encoder_neurons, learning_rate_encoder, learning_rate_r, learning_rate_t,
learning_rate_q, weight_decay, gamma_1, gamma_2, optimizer_encoder, optimizer_model,
max_steps, batch_norm=True, target_network=True, propagate_next_state=False, no_sample=False,
softplus=False, alpha=0.1, beta=0.0):
ExpectationModelVQ.__init__(
self, input_shape, num_blocks, dimensionality, num_actions, encoder_filters, encoder_filter_sizes,
encoder_strides, encoder_neurons, learning_rate_encoder, learning_rate_r, learning_rate_t, weight_decay,
gamma_1, optimizer_encoder, optimizer_model, max_steps, batch_norm=batch_norm,
target_network=target_network, propagate_next_state=propagate_next_state, no_sample=no_sample,
softplus=softplus, alpha=alpha, beta=beta
)
self.gamma_2 = gamma_2
self.learning_rate_q = learning_rate_q
def validate(self, depths, hand_states, actions, rewards, q_values, next_depths, next_hand_states, dones,
batch_size=100):
num_steps = int(np.ceil(depths.shape[0] / batch_size))
losses = []
for i in range(num_steps):
batch_slice = np.index_exp[i * batch_size:(i + 1) * batch_size]
feed_dict = {
self.states_pl: depths[batch_slice],
self.hand_states_pl: hand_states[:, np.newaxis][batch_slice],
self.actions_pl: actions[batch_slice],
self.rewards_pl: rewards[batch_slice],
self.q_values_pl: q_values[batch_slice],
self.next_states_pl: next_depths[batch_slice],
self.next_hand_states_pl: next_hand_states[:, np.newaxis][batch_slice],
self.dones_pl: dones[batch_slice],
self.is_training_pl: False
}
l1, l2, l3 = self.session.run(
[self.full_q_loss_t, self.full_transition_loss_t, self.full_reward_loss_t], feed_dict=feed_dict
)
losses.append(np.transpose(np.array([l1, l2, l3]), axes=(1, 0)))
losses = np.concatenate(losses, axis=0)
return losses
def validate_and_encode(self, depths, hand_states, actions, rewards, q_values, next_depths, next_hand_states, dones,
batch_size=100):
num_steps = int(np.ceil(depths.shape[0] / batch_size))
losses = []
embeddings = []
for i in range(num_steps):
batch_slice = np.index_exp[i * batch_size:(i + 1) * batch_size]
feed_dict = {
self.states_pl: depths[batch_slice],
self.hand_states_pl: hand_states[:, np.newaxis][batch_slice],
self.actions_pl: actions[batch_slice],
self.rewards_pl: rewards[batch_slice],
self.q_values_pl: q_values[batch_slice],
self.next_states_pl: next_depths[batch_slice],
self.next_hand_states_pl: next_hand_states[:, np.newaxis][batch_slice],
self.dones_pl: dones[batch_slice],
self.is_training_pl: False
}
tmp_embeddings, l1, l2, l3 = self.session.run([
self.state_mu_t, self.full_q_loss_t, self.full_transition_loss_t, self.full_reward_loss_t],
feed_dict=feed_dict
)
losses.append(np.transpose(np.array([l1, l2, l3]), axes=(1, 0)))
embeddings.append(tmp_embeddings)
losses = np.concatenate(losses, axis=0)
embeddings = np.concatenate(embeddings)
return losses, embeddings
def build_placeholders(self):
self.states_pl = tf.placeholder(tf.float32, shape=(None, *self.input_shape), name="states_pl")
self.actions_pl = tf.placeholder(tf.int32, shape=(None,), name="actions_pl")
self.rewards_pl = tf.placeholder(tf.float32, shape=(None,), name="rewards_pl")
self.q_values_pl = tf.placeholder(tf.float32, shape=(None, self.num_actions), name="q_values_pl")
self.dones_pl = tf.placeholder(tf.bool, shape=(None,), name="dones_pl")
self.next_states_pl = tf.placeholder(tf.float32, shape=(None, *self.input_shape), name="next_states_pl")
self.is_training_pl = tf.placeholder(tf.bool, shape=[], name="is_training_pl")
self.hand_states_pl = tf.placeholder(tf.float32, shape=(None, 1), name="hand_states_pl")
self.next_hand_states_pl = tf.placeholder(tf.float32, shape=(None, 1), name="next_hand_states_pl")
def build_model(self):
self.state_mu_t = \
self.build_encoder(self.states_pl, self.hand_states_pl, namespace=self.ENCODER_NAMESPACE)
self.embeds = tf.get_variable(
"embeddings", [self.num_embeddings, self.dimensionality],
initializer=tf.truncated_normal_initializer(stddev=0.02)
)
self.state_sample_t, self.state_classes_t = self.quantize(self.state_mu_t)
if self.target_network:
self.next_state_mu_t = self.build_encoder(
self.next_states_pl, self.next_hand_states_pl, share_weights=False,
namespace=self.TARGET_ENCODER_NAMESPACE
)
self.build_target_update()
else:
self.next_state_mu_t = self.build_encoder(
self.next_states_pl, self.next_hand_states_pl, share_weights=True, namespace=self.ENCODER_NAMESPACE
)
self.next_state_sample_t, self.next_state_classes_t = self.quantize(self.next_state_mu_t)
self.q_v = tf.get_variable(
"q_values_matrix", shape=(self.num_actions, self.dimensionality), dtype=tf.float32,
initializer=tf.random_normal_initializer(mean=0, stddev=np.sqrt(2 / self.dimensionality), dtype=tf.float32)
)
self.r_v = tf.get_variable(
"reward_matrix", shape=(self.num_actions, self.dimensionality), dtype=tf.float32,
initializer=tf.random_normal_initializer(mean=0, stddev=np.sqrt(2 / self.dimensionality), dtype=tf.float32)
)
self.t_v = tf.get_variable(
"transition_matrix", shape=(self.num_actions, self.dimensionality, self.dimensionality), dtype=tf.float32,
initializer=tf.random_normal_initializer(mean=0, stddev=np.sqrt(2 / self.dimensionality), dtype=tf.float32)
)
def build_training(self):
# create global training step variable
self.global_step = tf.train.get_or_create_global_step()
self.float_dones_t = tf.cast(self.dones_pl, tf.float32)
# gather appropriate transition matrices
self.gather_r_t = tf.gather(self.r_v, self.actions_pl)
self.gather_t_t = tf.gather(self.t_v, self.actions_pl)
# build losses
self.build_q_loss()
self.build_reward_loss()
self.build_transition_loss()
self.build_left_and_right_embedding_losses()
self.build_weight_decay_loss()
# build the whole loss
self.gamma_v = tf.Variable(initial_value=self.gamma, trainable=False)
self.main_loss_t = self.q_loss_t + self.gamma_2 * self.reward_loss_t + \
tf.stop_gradient(self.gamma_v) * self.transition_loss_t + self.regularization_loss_t
self.loss_t = self.main_loss_t + self.right_loss_t
# build training
self.build_encoder_and_embedding_training()
self.build_q_model_training()
self.build_r_model_training()
self.build_t_model_training()
self.model_train_step = tf.group(self.q_step, self.r_step, self.t_step)
self.train_step = tf.group(self.encoder_train_step, self.model_train_step)
def build_q_loss(self):
self.q_prediction_t = tf.reduce_sum(self.state_sample_t[:, tf.newaxis, :] * self.q_v[tf.newaxis, :, :], axis=2)
term1 = tf.reduce_mean(tf.square(self.q_values_pl - self.q_prediction_t), axis=1)
self.full_q_loss_t = (1 / 2) * term1
self.q_loss_t = tf.reduce_mean(self.full_q_loss_t, axis=0)
def build_q_model_training(self):
q_optimizer = agent_utils.get_optimizer(self.optimizer_model, self.learning_rate_q)
self.q_step = q_optimizer.minimize(
self.q_loss_t, var_list=[self.q_v]
)
class ExpectationModelVQWithQNeural(ExpectationModelVQWithQ):
def __init__(self, input_shape, num_blocks, dimensionality, num_actions, encoder_filters, encoder_filter_sizes,
encoder_strides, encoder_neurons, q_neurons, learning_rate_encoder, learning_rate_r, learning_rate_t,
learning_rate_q, weight_decay, gamma_1, gamma_2, optimizer_encoder, optimizer_model,
max_steps, batch_norm=True, target_network=True, propagate_next_state=False, no_sample=False,
softplus=False, alpha=0.1, beta=0.0):
ExpectationModelVQWithQ.__init__(
self, input_shape, num_blocks, dimensionality, num_actions, encoder_filters, encoder_filter_sizes,
encoder_strides, encoder_neurons, learning_rate_encoder, learning_rate_r, learning_rate_t, learning_rate_q,
weight_decay, gamma_1, gamma_2, optimizer_encoder, optimizer_model, max_steps, batch_norm=batch_norm,
target_network=target_network, propagate_next_state=propagate_next_state, no_sample=no_sample,
softplus=softplus, alpha=alpha, beta=beta
)
self.q_neurons = q_neurons
def build_model(self):
self.state_mu_t = \
self.build_encoder(self.states_pl, self.hand_states_pl, namespace=self.ENCODER_NAMESPACE)
self.embeds = tf.get_variable(
"embeddings", [self.num_embeddings, self.dimensionality],
initializer=tf.truncated_normal_initializer(stddev=0.02)
)
self.state_sample_t, self.state_classes_t = self.quantize(self.state_mu_t)
if self.target_network:
self.next_state_mu_t = self.build_encoder(
self.next_states_pl, self.next_hand_states_pl, share_weights=False,
namespace=self.TARGET_ENCODER_NAMESPACE
)
self.build_target_update()
else:
self.next_state_mu_t = self.build_encoder(
self.next_states_pl, self.next_hand_states_pl, share_weights=True, namespace=self.ENCODER_NAMESPACE
)
self.next_state_sample_t, self.next_state_classes_t = self.quantize(self.next_state_mu_t)
self.r_v = tf.get_variable(
"reward_matrix", shape=(self.num_actions, self.dimensionality), dtype=tf.float32,
initializer=tf.random_normal_initializer(mean=0, stddev=np.sqrt(2 / self.dimensionality), dtype=tf.float32)
)
self.t_v = tf.get_variable(
"transition_matrix", shape=(self.num_actions, self.dimensionality, self.dimensionality), dtype=tf.float32,
initializer=tf.random_normal_initializer(mean=0, stddev=np.sqrt(2 / self.dimensionality), dtype=tf.float32)
)
def build_q_loss(self):
self.q_prediction_t = self.build_q_model(self.state_sample_t)
term1 = tf.reduce_mean(tf.square(self.q_values_pl - self.q_prediction_t), axis=1)
self.full_q_loss_t = (1 / 2) * term1
self.q_loss_t = tf.reduce_mean(self.full_q_loss_t, axis=0)
def build_q_model_training(self):
q_optimizer = agent_utils.get_optimizer(self.optimizer_model, self.learning_rate_q)
self.q_step = q_optimizer.minimize(
self.q_loss_t, var_list=tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope="q_model")
)
def build_q_model(self, embedding):
x = embedding
with tf.variable_scope("q_model"):
for idx, neurons in enumerate(self.q_neurons):
with tf.variable_scope("fc{:d}".format(idx)):
if idx == len(self.q_neurons) - 1:
x = tf.layers.dense(
x, neurons, activation=None,
kernel_regularizer=agent_utils.get_weight_regularizer(self.weight_decay),
kernel_initializer=agent_utils.get_mrsa_initializer()
)
else:
x = tf.layers.dense(
x, neurons, activation=tf.nn.relu if not self.batch_norm else None,
kernel_regularizer=agent_utils.get_weight_regularizer(self.weight_decay),
kernel_initializer=agent_utils.get_mrsa_initializer(),
use_bias=not self.batch_norm
)
if self.batch_norm:
x = tf.layers.batch_normalization(x, training=self.is_training_pl)
x = tf.nn.relu(x)
return x
class ExpectationModelHierarchy:
L1_NAMESPACE = "l1"
L1_TARGET_NAMESPACE = "target_l1"
L2_NAMESPACE = "l2"
L2_TARGET_NAMESPACE = "target_l2"
def __init__(self, input_shape, l1_num_blocks, l2_num_blocks, num_actions, encoder_filters, encoder_filter_sizes,
encoder_strides, encoder_neurons, l1_learning_rate_encoder, l2_learning_rate_encoder, learning_rate_r, learning_rate_t,
weight_decay, gamma, optimizer_encoder, optimizer_model, max_steps, l2_hiddens, batch_norm=True,
target_network=True, propagate_next_state=False, no_sample=False):
if propagate_next_state:
assert not target_network
self.input_shape = input_shape
self.l1_num_blocks = l1_num_blocks
self.l2_num_blocks = l2_num_blocks
self.num_actions = num_actions
self.encoder_filters = encoder_filters
self.encoder_filter_sizes = encoder_filter_sizes
self.encoder_strides = encoder_strides
self.encoder_neurons = encoder_neurons
self.l1_learning_rate_encoder = l1_learning_rate_encoder
self.l2_learning_rate_encoder = l2_learning_rate_encoder
self.learning_rate_r = learning_rate_r
self.learning_rate_t = learning_rate_t
self.weight_decay = weight_decay
self.gamma = gamma
self.optimizer_encoder = optimizer_encoder
self.optimizer_model = optimizer_model
self.max_steps = max_steps
self.l2_hiddens = l2_hiddens
self.batch_norm = batch_norm
self.target_network = target_network
self.propagate_next_state = propagate_next_state
self.no_sample = no_sample
self.hand_states_pl, self.next_hand_states_pl = None, None
def encode(self, depths, hand_states, level, batch_size=100):
num_steps = int(np.ceil(depths.shape[0] / batch_size))
embeddings = []
if level == self.L1_NAMESPACE:
to_run = self.l1_state_mu_t
else:
to_run = self.l2_softmax_t
for i in range(num_steps):
batch_slice = np.index_exp[i * batch_size:(i + 1) * batch_size]
feed_dict = {
self.states_pl: depths[batch_slice],
self.hand_states_pl: hand_states[batch_slice],
self.is_training_pl: False
}
embedding = self.session.run(to_run, feed_dict=feed_dict)
embeddings.append(embedding)
embeddings = np.concatenate(embeddings, axis=0)
return embeddings
def validate(self, depths, hand_states, actions, rewards, next_depths, next_hand_states, dones, level,
batch_size=100):
num_steps = int(np.ceil(depths.shape[0] / batch_size))
losses = []
if level == self.L1_NAMESPACE:
to_run = [self.l1_full_transition_loss_t, self.l1_full_reward_loss_t]
else:
to_run = [self.l2_full_transition_loss_t, self.l2_full_reward_loss_t]
for i in range(num_steps):
batch_slice = np.index_exp[i * batch_size:(i + 1) * batch_size]
feed_dict = {
self.states_pl: depths[batch_slice],
self.hand_states_pl: hand_states[:, np.newaxis][batch_slice],
self.actions_pl: actions[batch_slice],
self.rewards_pl: rewards[batch_slice],
self.next_states_pl: next_depths[batch_slice],
self.next_hand_states_pl: next_hand_states[:, np.newaxis][batch_slice],
self.dones_pl: dones[batch_slice],
self.is_training_pl: False
}
l1, l2 = self.session.run(to_run, feed_dict=feed_dict)
losses.append(np.transpose(np.array([l1, l2]), axes=(1, 0)))
losses = np.concatenate(losses, axis=0)
return losses
def validate_and_encode(self, depths, hand_states, actions, rewards, next_depths, next_hand_states, dones,
level, batch_size=100):
num_steps = int(np.ceil(depths.shape[0] / batch_size))
losses = []
embeddings = []
if level == self.L1_NAMESPACE:
to_run = [self.l1_state_mu_t, self.l1_full_transition_loss_t, self.l1_full_reward_loss_t]
else:
to_run = [self.l2_softmax_t, self.l2_full_transition_loss_t, self.l2_full_reward_loss_t]
for i in range(num_steps):
batch_slice = np.index_exp[i * batch_size:(i + 1) * batch_size]
feed_dict = {
self.states_pl: depths[batch_slice],
self.hand_states_pl: hand_states[:, np.newaxis][batch_slice],
self.actions_pl: actions[batch_slice],
self.rewards_pl: rewards[batch_slice],
self.next_states_pl: next_depths[batch_slice],
self.next_hand_states_pl: next_hand_states[:, np.newaxis][batch_slice],
self.dones_pl: dones[batch_slice],
self.is_training_pl: False
}
tmp_embeddings, l1, l2 = self.session.run(to_run, feed_dict=feed_dict)
losses.append(np.transpose(np.array([l1, l2]), axes=(1, 0)))
embeddings.append(tmp_embeddings)
losses = np.concatenate(losses, axis=0)
embeddings = np.concatenate(embeddings)
return losses, embeddings
def build(self):
self.build_placeholders()
self.build_model()
self.build_training()
def build_placeholders(self):
self.states_pl = tf.placeholder(tf.float32, shape=(None, *self.input_shape), name="states_pl")
self.actions_pl = tf.placeholder(tf.int32, shape=(None,), name="actions_pl")
self.rewards_pl = tf.placeholder(tf.float32, shape=(None,), name="rewards_pl")
self.dones_pl = tf.placeholder(tf.bool, shape=(None,), name="dones_pl")
self.next_states_pl = tf.placeholder(tf.float32, shape=(None, *self.input_shape), name="next_states_pl")
self.is_training_pl = tf.placeholder(tf.bool, shape=[], name="is_training_pl")
self.hand_states_pl = tf.placeholder(tf.float32, shape=(None, 1), name="hand_states_pl")
self.next_hand_states_pl = tf.placeholder(tf.float32, shape=(None, 1), name="next_hand_states_pl")
def build_model(self):
# level one
self.l1_state_mu_t, self.l1_state_log_var_t, self.l1_state_sample_t = \
self.build_l1(self.states_pl, self.hand_states_pl, namespace=self.L1_NAMESPACE)
if self.target_network:
self.l1_next_state_mu_t, self.l1_next_state_log_var_t, self.l1_next_state_sample_t = self.build_l1(
self.next_states_pl, self.next_hand_states_pl, share_weights=False,
namespace=self.L1_TARGET_NAMESPACE
)
else:
self.l1_next_state_mu_t, self.l1_next_state_log_var_t, self.l1_next_state_sample_t = self.build_l1(
self.next_states_pl, self.next_hand_states_pl, share_weights=True, namespace=self.L1_NAMESPACE
)
self.l1_r_v = tf.get_variable(
"reward_matrix_l1", shape=(self.num_actions, self.l1_num_blocks), dtype=tf.float32,
initializer=tf.random_normal_initializer(mean=0, stddev=np.sqrt(2 / self.l1_num_blocks), dtype=tf.float32)
)
self.l1_r_t = self.l1_r_v
self.l1_t_v = tf.get_variable(
"transition_matrix_l1", shape=(self.num_actions, self.l1_num_blocks, self.l1_num_blocks), dtype=tf.float32,
initializer=tf.random_normal_initializer(mean=0, stddev=np.sqrt(2 / self.l1_num_blocks), dtype=tf.float32)
)
self.l1_t_t = self.l1_t_v
# level two
self.l2_logits_t, self.l2_softmax_t = self.build_l2(self.l1_state_mu_t, self.L2_NAMESPACE)
if self.target_network:
self.l2_next_logits_t, self.l2_next_softmax_t = self.build_l2(
self.l1_state_mu_t, namespace=self.L2_TARGET_NAMESPACE, share_weights=False
)
else:
self.l2_next_logits_t, self.l2_next_softmax_t = self.build_l2(
self.l1_state_mu_t, namespace=self.L2_NAMESPACE, share_weights=True
)
self.l2_r_v = tf.get_variable(
"reward_matrix_l2", shape=(self.num_actions, self.l2_num_blocks), dtype=tf.float32,
initializer=tf.random_uniform_initializer(minval=0, maxval=1, dtype=tf.float32)
)
self.l2_r_t = self.l2_r_v
self.l2_t_v = tf.get_variable(
"transition_matrix_l2", shape=(self.num_actions, self.l2_num_blocks, self.l2_num_blocks), dtype=tf.float32,
initializer=tf.random_normal_initializer(mean=0, stddev=np.sqrt(2 / self.l2_num_blocks), dtype=tf.float32)
)
self.l2_t_t = tf.nn.softmax(self.l2_t_v, axis=-1)
# build target updates
if self.target_network:
self.build_target_update()
def build_training(self):
# create global training step variable
self.global_step = tf.train.get_or_create_global_step()
# gather appropriate transition matrices
l1_r_per_action_t = tf.gather(self.l1_r_t, self.actions_pl)
l2_r_per_action_t = tf.gather(self.l2_r_t, self.actions_pl)
l1_t_per_action_t = tf.gather(self.l1_t_t, self.actions_pl)
l2_t_per_action_t = tf.gather(tf.nn.log_softmax(self.l2_t_v), self.actions_pl)
# reward cross-entropy
dones_t = tf.cast(self.dones_pl, tf.float32)
self.l1_full_reward_loss_t = (1 / 2) * tf.square(
self.rewards_pl - tf.reduce_sum(self.l1_state_sample_t * l1_r_per_action_t, axis=1)
)
self.l1_reward_loss_t = tf.reduce_mean(self.l1_full_reward_loss_t, axis=0)
self.l2_full_reward_loss_t = (1 / 2) * tf.reduce_sum(
tf.square(self.rewards_pl[:, tf.newaxis] - l2_r_per_action_t) * self.l2_softmax_t, axis=1
)
self.l2_reward_loss_t = tf.reduce_mean(self.l2_full_reward_loss_t, axis=0)
# transition cross-entropy
l1_transformed_logits = tf.matmul(self.l1_state_sample_t[:, tf.newaxis, :], l1_t_per_action_t)
l1_transformed_logits = l1_transformed_logits[:, 0, :]
self.l1_full_transition_loss_t = (1 / 2) * tf.reduce_sum(
tf.square(tf.stop_gradient(self.l1_next_state_sample_t) - l1_transformed_logits), axis=1
) * (1 - dones_t)
self.l1_transition_loss_t = tf.reduce_sum(self.l1_full_transition_loss_t, axis=0) / tf.reduce_max([1.0, tf.reduce_sum(1 - dones_t)])
term1 = self.l2_softmax_t[:, :, tf.newaxis] * tf.stop_gradient(self.l2_next_softmax_t[:, tf.newaxis, :]) * \
l2_t_per_action_t
self.l2_full_transition_loss_t = - tf.reduce_sum(term1, axis=[1, 2]) * (1 - dones_t)
self.l2_transition_loss_t = tf.reduce_sum(self.l2_full_transition_loss_t, axis=0) / tf.reduce_max([1.0, tf.reduce_sum(1 - dones_t)])
# regularization
l1_reg = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES, scope=self.L1_NAMESPACE)
l2_reg = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES, scope=self.L2_NAMESPACE)
self.l1_regularization_loss_t = 0
self.l2_regularization_loss_t = 0
if len(l1_reg) > 0:
self.l1_regularization_loss_t = tf.add_n(l1_reg)
if len(l2_reg) > 0:
self.l2_regularization_loss_t = tf.add_n(l2_reg)
# full loss
self.gamma_v = tf.Variable(initial_value=self.gamma, trainable=False, dtype=tf.float32)
self.l1_loss_t = self.l1_reward_loss_t + \
tf.stop_gradient(self.gamma_v) * self.l1_transition_loss_t + \
self.l1_regularization_loss_t
self.l2_loss_t = self.l2_reward_loss_t + \
tf.stop_gradient(self.gamma_v) * self.l2_transition_loss_t + \
self.l2_regularization_loss_t
# optimizers
l1_encoder_variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=self.L1_NAMESPACE)
l2_encoder_variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=self.L2_NAMESPACE)
l1_encoder_optimizer = agent_utils.get_optimizer(self.optimizer_encoder, self.l1_learning_rate_encoder)
l2_encoder_optimizer = agent_utils.get_optimizer(self.optimizer_encoder, self.l2_learning_rate_encoder)
self.l1_encoder_train_step = l1_encoder_optimizer.minimize(
self.l1_loss_t, global_step=self.global_step, var_list=l1_encoder_variables
)
self.l2_encoder_train_step = l2_encoder_optimizer.minimize(
self.l2_loss_t, global_step=self.global_step, var_list=l2_encoder_variables
)
if self.batch_norm:
self.update_op = tf.group(*tf.get_collection(tf.GraphKeys.UPDATE_OPS))
self.l1_encoder_train_step = tf.group(self.l1_encoder_train_step, self.update_op)
l1_r_optimizer = agent_utils.get_optimizer(self.optimizer_model, self.learning_rate_r)
l2_r_optimizer = agent_utils.get_optimizer(self.optimizer_model, self.learning_rate_r)
self.l1_r_step = l1_r_optimizer.minimize(
self.l1_reward_loss_t, var_list=[self.l1_r_v]
)
self.l2_r_step = l2_r_optimizer.minimize(
self.l2_reward_loss_t, var_list=[self.l2_r_v]
)
l1_t_optimizer = agent_utils.get_optimizer(self.optimizer_model, self.learning_rate_t)
l2_t_optimizer = agent_utils.get_optimizer(self.optimizer_model, self.learning_rate_t)
self.l1_t_step = l1_t_optimizer.minimize(
self.l1_transition_loss_t, var_list=[self.l1_t_v]
)
self.l2_t_step = l2_t_optimizer.minimize(
self.l2_transition_loss_t, var_list=[self.l2_t_v]
)
self.l1_model_train_step = tf.group(self.l1_r_step, self.l1_t_step)
self.l2_model_train_step = tf.group(self.l2_r_step, self.l2_t_step)
self.l1_train_step = tf.group(self.l1_encoder_train_step, self.l1_model_train_step)
self.l2_train_step = tf.group(self.l2_encoder_train_step, self.l2_model_train_step)
def build_l1(self, depth_pl, hand_state_pl, share_weights=False, namespace=L1_NAMESPACE):
x = tf.expand_dims(depth_pl, axis=-1)
with tf.variable_scope(namespace, reuse=share_weights):
for idx in range(len(self.encoder_filters)):
with tf.variable_scope("conv{:d}".format(idx + 1)):
x = tf.layers.conv2d(
x, self.encoder_filters[idx], self.encoder_filter_sizes[idx], self.encoder_strides[idx],
padding="SAME", activation=tf.nn.relu if not self.batch_norm else None,
kernel_regularizer=agent_utils.get_weight_regularizer(self.weight_decay),
kernel_initializer=agent_utils.get_mrsa_initializer(),
use_bias=not self.batch_norm
)
if self.batch_norm and idx != len(self.encoder_filters) - 1:
x = tf.layers.batch_normalization(x, training=self.is_training_pl)
x = tf.nn.relu(x)
x = tf.layers.flatten(x)
if self.batch_norm:
x = tf.layers.batch_normalization(x, training=self.is_training_pl)
x = tf.nn.relu(x)
x = tf.concat([x, hand_state_pl], axis=1)
for idx, neurons in enumerate(self.encoder_neurons):
with tf.variable_scope("fc{:d}".format(idx + 1)):
x = tf.layers.dense(
x, neurons, activation=tf.nn.relu if not self.batch_norm else None,
kernel_regularizer=agent_utils.get_weight_regularizer(self.weight_decay),
kernel_initializer=agent_utils.get_mrsa_initializer(),
use_bias=not self.batch_norm
)
if self.batch_norm:
x = tf.layers.batch_normalization(x, training=self.is_training_pl)
x = tf.nn.relu(x)
with tf.variable_scope("predict"):
mu = tf.layers.dense(
x, self.l1_num_blocks, activation=None,
kernel_regularizer=agent_utils.get_weight_regularizer(self.weight_decay),
kernel_initializer=agent_utils.get_mrsa_initializer()
)
log_var = tf.layers.dense(
x, self.l1_num_blocks, activation=None,
kernel_regularizer=agent_utils.get_weight_regularizer(self.weight_decay),
kernel_initializer=agent_utils.get_mrsa_initializer()
)
if self.no_sample:
sample = mu
else:
noise = tf.random_normal(
shape=(tf.shape(mu)[0], self.l1_num_blocks), mean=0, stddev=1.0
)
var_t = tf.exp(log_var)
sd = tf.sqrt(var_t)
sd_noise_t = noise * sd
sample = mu + sd_noise_t
return mu, log_var, sample
def build_l2(self, l1_output, namespace, share_weights=False):
x = l1_output
with tf.variable_scope(namespace, reuse=share_weights):
for idx, neurons in enumerate(self.l2_hiddens):
with tf.variable_scope("fc{:d}".format(idx + 1)):
x = tf.layers.dense(
x, neurons, activation=tf.nn.relu,
kernel_regularizer=agent_utils.get_weight_regularizer(self.weight_decay),
kernel_initializer=agent_utils.get_mrsa_initializer()
)
l2_logits = tf.layers.dense(
x, self.l2_num_blocks, activation=None,
kernel_regularizer=agent_utils.get_weight_regularizer(self.weight_decay),
kernel_initializer=agent_utils.get_mrsa_initializer()
)
l2_softmax = tf.nn.softmax(l2_logits)
return l2_logits, l2_softmax
def build_target_update(self):
ops = []
for source, target in zip([self.L1_NAMESPACE, self.L2_NAMESPACE],
[self.L1_TARGET_NAMESPACE, self.L2_TARGET_NAMESPACE]):
source_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=source)
target_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=target)
assert len(source_vars) == len(target_vars) and len(source_vars) > 0
for source_var, target_var in zip(source_vars, target_vars):
ops.append(tf.assign(target_var, source_var))
self.target_update_op = tf.group(*ops)
def set_gamma(self, value):
self.session.run(tf.assign(self.gamma_v, value))
def start_session(self, gpu_memory=None):
gpu_options = None
if gpu_memory is not None:
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=gpu_memory)
tf_config = tf.ConfigProto(gpu_options=gpu_options)
self.session = tf.Session(config=tf_config)
self.session.run(tf.global_variables_initializer())
def stop_session(self):
if self.session is not None:
self.session.close()
class RobsTwoStepModel:
ENCODER_NAMESPACE = "encoder"
TARGET_ENCODER_NAMESPACE = "target_encoder"
def __init__(self, input_shape, num_blocks, num_actions, encoder_filters, encoder_filter_sizes,
encoder_strides, encoder_neurons, learning_rate_encoder, learning_rate_r, learning_rate_t,
weight_decay, gamma, optimizer_encoder, optimizer_model, max_steps, batch_norm=True,
tau=1.0, stop_step_two_reward_gradients=False, normal_t_init=False):
self.input_shape = input_shape
self.num_blocks = num_blocks
self.num_actions = num_actions
self.encoder_filters = encoder_filters
self.encoder_filter_sizes = encoder_filter_sizes
self.encoder_strides = encoder_strides
self.encoder_neurons = encoder_neurons
self.learning_rate_encoder = learning_rate_encoder
self.learning_rate_r = learning_rate_r
self.learning_rate_t = learning_rate_t
self.weight_decay = weight_decay
self.gamma = gamma
self.optimizer_encoder = optimizer_encoder
self.optimizer_model = optimizer_model
self.max_steps = max_steps
self.batch_norm = batch_norm
self.tau = tau
self.stop_step_two_reward_gradients = stop_step_two_reward_gradients
self.normal_t_init = normal_t_init
self.hand_states_pl, self.next_hand_states_pl = None, None
def encode(self, depths, hand_states, batch_size=100):
num_steps = int(np.ceil(depths.shape[0] / batch_size))
embeddings = []
for i in range(num_steps):
batch_slice = np.index_exp[i * batch_size:(i + 1) * batch_size]
feed_dict = {
self.states_pl: depths[batch_slice],
self.hand_states_pl: hand_states[batch_slice],
self.is_training_pl: False
}
embedding = self.session.run(self.state_softmax_t, feed_dict=feed_dict)
embeddings.append(embedding)
embeddings = np.concatenate(embeddings, axis=0)
return embeddings
def validate(self, depths, hand_states, actions, rewards, next_actions, next_rewards, dones, batch_size=100):
num_steps = int(np.ceil(depths.shape[0] / batch_size))
losses = []
for i in range(num_steps):
batch_slice = np.index_exp[i * batch_size:(i + 1) * batch_size]
feed_dict = {
self.states_pl: depths[batch_slice],
self.hand_states_pl: hand_states[:, np.newaxis][batch_slice],
self.actions_pl: actions[batch_slice],
self.rewards_pl: rewards[batch_slice],
self.next_actions_pl: next_actions[batch_slice],
self.next_rewards_pl: next_rewards[batch_slice],
self.dones_pl: dones[batch_slice],
self.is_training_pl: False
}
l1, l2 = self.session.run(
[self.full_step_one_reward_loss_t, self.full_step_two_reward_loss_t], feed_dict=feed_dict
)
losses.append(np.transpose(np.array([l1, l2]), axes=(1, 0)))
losses = np.concatenate(losses, axis=0)
return losses
def validate_and_encode(self, depths, hand_states, actions, rewards, next_actions, next_rewards, dones,
batch_size=100):
num_steps = int(np.ceil(depths.shape[0] / batch_size))
losses = []
embeddings = []
for i in range(num_steps):
batch_slice = np.index_exp[i * batch_size:(i + 1) * batch_size]
feed_dict = {
self.states_pl: depths[batch_slice],
self.hand_states_pl: hand_states[:, np.newaxis][batch_slice],
self.actions_pl: actions[batch_slice],
self.rewards_pl: rewards[batch_slice],
self.next_actions_pl: next_actions[batch_slice],
self.next_rewards_pl: next_rewards[batch_slice],
self.dones_pl: dones[batch_slice],
self.is_training_pl: False
}
tmp_embeddings, l1, l2 = self.session.run([
self.state_softmax_t, self.full_step_one_reward_loss_t, self.full_step_two_reward_loss_t],
feed_dict=feed_dict
)
losses.append(np.transpose(np.array([l1, l2]), axes=(1, 0)))
embeddings.append(tmp_embeddings)
losses = np.concatenate(losses, axis=0)
embeddings = np.concatenate(embeddings)
return losses, embeddings
def build(self):
self.build_placeholders()
self.build_model()
self.build_training()
def build_placeholders(self):
self.states_pl = tf.placeholder(tf.float32, shape=(None, *self.input_shape), name="states_pl")
self.hand_states_pl = tf.placeholder(tf.float32, shape=(None, 1), name="hand_states_pl")
self.actions_pl = tf.placeholder(tf.int32, shape=(None,), name="actions_pl")
self.rewards_pl = tf.placeholder(tf.float32, shape=(None,), name="rewards_pl")
self.dones_pl = tf.placeholder(tf.bool, shape=(None,), name="dones_pl")
self.next_actions_pl = tf.placeholder(tf.int32, shape=(None,), name="next_actions_pl")
self.next_rewards_pl = tf.placeholder(tf.float32, shape=(None,), name="next_rewards_pl")
self.is_training_pl = tf.placeholder(tf.bool, shape=[], name="is_training_pl")
def build_model(self):
self.state_logits_t, self.state_softmax_t = self.build_encoder(self.states_pl, self.hand_states_pl)
self.perplexity_t = tf.constant(2, dtype=tf.float32) ** (
- tf.reduce_mean(
tf.reduce_sum(
self.state_softmax_t * tf.log(self.state_softmax_t + 1e-7) /
tf.log(tf.constant(2, dtype=self.state_softmax_t.dtype)),
axis=1
),
axis=0
)
)
self.r_v = tf.get_variable(
"reward_matrix", shape=(self.num_actions, self.num_blocks), dtype=tf.float32,
initializer=tf.random_uniform_initializer(minval=0, maxval=1, dtype=tf.float32)
)
self.r_t = self.r_v
if self.normal_t_init:
init = tf.random_normal_initializer(mean=0, stddev=np.sqrt(2 / self.num_blocks))
else:
init = tf.random_uniform_initializer(minval=0, maxval=1, dtype=tf.float32)
self.t_v = tf.get_variable(
"transition_matrix", shape=(self.num_actions, self.num_blocks, self.num_blocks), dtype=tf.float32,
initializer=init
)
self.t_t = tf.nn.softmax(self.t_v, axis=-1)
def build_training(self):
# create global training step variable
self.global_step = tf.train.get_or_create_global_step()
# gather appropriate transition matrices
r_per_action_t = tf.gather(self.r_t, self.actions_pl)
t_per_action_t = tf.gather(self.t_t, self.actions_pl)
next_r_per_action_t = tf.gather(self.r_t, self.next_actions_pl)
if self.stop_step_two_reward_gradients:
next_r_per_action_t = tf.stop_gradient(next_r_per_action_t)
# one step reward loss
self.full_step_one_reward_loss_t, self.step_one_reward_loss_t = \
self.build_reward_loss(r_per_action_t, self.rewards_pl, self.state_softmax_t)
# two step reward loss
self.transformed_next_state_softmax_t = \
tf.matmul(self.state_softmax_t[:, tf.newaxis, :], t_per_action_t)[:, 0, :]
self.full_step_two_reward_loss_t, self.step_two_reward_loss_t = \
self.build_reward_loss(
next_r_per_action_t, self.next_rewards_pl, self.transformed_next_state_softmax_t, dones=self.dones_pl
)
# regularization
reg = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
if len(reg) > 0:
self.regularization_loss_t = tf.add_n(reg)
else:
self.regularization_loss_t = 0
# full loss
self.gamma_v = tf.Variable(initial_value=self.gamma, trainable=False, dtype=tf.float32)
self.loss_t = self.step_one_reward_loss_t + tf.stop_gradient(self.gamma_v) * self.step_two_reward_loss_t + \
self.regularization_loss_t
# optimizers
encoder_variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=self.ENCODER_NAMESPACE)
encoder_optimizer = agent_utils.get_optimizer(self.optimizer_encoder, self.learning_rate_encoder)
self.encoder_train_step = encoder_optimizer.minimize(
self.loss_t, global_step=self.global_step, var_list=encoder_variables
)
if self.batch_norm:
self.update_op = tf.group(*tf.get_collection(tf.GraphKeys.UPDATE_OPS))
self.encoder_train_step = tf.group(self.encoder_train_step, self.update_op)
model_train_step = []
r_optimizer = agent_utils.get_optimizer(self.optimizer_model, self.learning_rate_r)
self.r_step = r_optimizer.minimize(
self.loss_t, var_list=[self.r_v]
)
model_train_step.append(self.r_step)
t_optimizer = agent_utils.get_optimizer(self.optimizer_model, self.learning_rate_t)
self.t_step = t_optimizer.minimize(
self.loss_t, var_list=[self.t_v]
)
model_train_step.append(self.t_step)
self.model_train_step = tf.group(*model_train_step)
self.train_step = tf.group(self.encoder_train_step, self.model_train_step)
@staticmethod
def build_reward_loss(expected_rewards, observed_rewards, encodings, dones=None):
# compute the reward loss
term1_t = tf.square(observed_rewards[:, tf.newaxis] - expected_rewards)
term2_t = term1_t * encodings
full_reward_loss_t = (1 / 2) * tf.reduce_sum(term2_t, axis=1)
if dones is not None:
dones = (1.0 - tf.cast(dones, dtype=tf.float32))
full_reward_loss_t = full_reward_loss_t * dones
reward_loss_t = tf.reduce_sum(full_reward_loss_t) / tf.reduce_max([tf.reduce_sum(dones), 1.0])
else:
reward_loss_t = tf.reduce_mean(full_reward_loss_t)
return full_reward_loss_t, reward_loss_t
def build_encoder(self, depth_pl, hand_state_pl, share_weights=False, namespace=ENCODER_NAMESPACE):
x = tf.expand_dims(depth_pl, axis=-1)
with tf.variable_scope(namespace, reuse=share_weights):
for idx in range(len(self.encoder_filters)):
with tf.variable_scope("conv{:d}".format(idx + 1)):
x = tf.layers.conv2d(
x, self.encoder_filters[idx], self.encoder_filter_sizes[idx], self.encoder_strides[idx],
padding="SAME", activation=tf.nn.relu if not self.batch_norm else None,
kernel_regularizer=agent_utils.get_weight_regularizer(self.weight_decay),
kernel_initializer=agent_utils.get_mrsa_initializer(),
use_bias=not self.batch_norm
)
if self.batch_norm and idx != len(self.encoder_filters) - 1:
x = tf.layers.batch_normalization(x, training=self.is_training_pl)
x = tf.nn.relu(x)
x = tf.layers.flatten(x)
if self.batch_norm:
x = tf.layers.batch_normalization(x, training=self.is_training_pl)
x = tf.nn.relu(x)
x = tf.concat([x, hand_state_pl], axis=1)
for idx, neurons in enumerate(self.encoder_neurons):
with tf.variable_scope("fc{:d}".format(idx + 1)):
x = tf.layers.dense(
x, neurons, activation=tf.nn.relu if not self.batch_norm else None,
kernel_regularizer=agent_utils.get_weight_regularizer(self.weight_decay),
kernel_initializer=agent_utils.get_mrsa_initializer(),
use_bias=not self.batch_norm
)
if self.batch_norm:
x = tf.layers.batch_normalization(x, training=self.is_training_pl)
x = tf.nn.relu(x)
with tf.variable_scope("predict"):
x = tf.layers.dense(
x, self.num_blocks, activation=None,
kernel_regularizer=agent_utils.get_weight_regularizer(self.weight_decay),
kernel_initializer=agent_utils.get_mrsa_initializer()
)
dist = tf.nn.softmax(x / self.tau)
return x, dist
def build_summaries(self):
# losses
tf.summary.scalar("loss", self.loss_t)
tf.summary.scalar("step_one_reward_loss", tf.reduce_mean(self.step_one_reward_loss_t))
tf.summary.scalar("step_two_reward_loss", tf.reduce_mean(self.step_two_reward_loss_t))
# logits and softmax
agent_utils.summarize(self.state_logits_t, "logits")
agent_utils.summarize(self.state_softmax_t, "softmax")
# grad norms
self.build_gradient_norm_summary(self.loss_t, self.state_logits_t, "state_logits_grad_norm")
self.build_gradient_norm_summary(self.loss_t, self.state_softmax_t, "state_softmax_grad_norm")
self.build_gradient_norm_summary(self.loss_t, self.r_v, "r_grad_norm", matrix_grad=True)
self.build_gradient_norm_summary(self.loss_t, self.t_v, "t_grad_norm", matrix_grad=True)
def build_gradient_norm_summary(self, y, x, name, matrix_grad=False):
# TODO: different dimensions of r_t and t_t
grad_t = tf.gradients(y, x)
if matrix_grad:
grad_t = grad_t[0]
norm_t = tf.norm(grad_t, ord=2, axis=-1)[0]
agent_utils.summarize(norm_t, name)
def set_gamma(self, value):
self.session.run(tf.assign(self.gamma_v, value))
def start_session(self, gpu_memory=None):
gpu_options = None
if gpu_memory is not None:
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=gpu_memory)
tf_config = tf.ConfigProto(gpu_options=gpu_options)
self.session = tf.Session(config=tf_config)
self.session.run(tf.global_variables_initializer())
def stop_session(self):
if self.session is not None:
self.session.close()
def save_matrices_as_images(self, step, save_dir, ext="pdf"):
r, p = self.session.run([self.r_t, self.t_t])
r = np.reshape(r, (r.shape[0], -1))
p = np.reshape(p, (p.shape[0], -1))
r_path = os.path.join(save_dir, "r_{:d}.{}".format(step, ext))
p_path = os.path.join(save_dir, "p_{:d}.{}".format(step, ext))
plt.clf()
plt.imshow(r, vmin=-0.5, vmax=1.5)
plt.colorbar()
plt.savefig(r_path)
plt.clf()
plt.imshow(p, vmin=0, vmax=1)
plt.colorbar()
plt.savefig(p_path)
class RobsTwoStepModelContinuous:
ENCODER_NAMESPACE = "encoder"
TARGET_ENCODER_NAMESPACE = "target_encoder"
def __init__(self, input_shape, num_blocks, num_actions, encoder_filters, encoder_filter_sizes,
encoder_strides, encoder_neurons, learning_rate_encoder, learning_rate_r, learning_rate_t,
weight_decay, gamma, optimizer_encoder, optimizer_model, max_steps, batch_norm=True,
stop_step_two_reward_gradients=False, model_init_std=0.1, no_sample=False):
self.input_shape = input_shape
self.num_blocks = num_blocks
self.num_actions = num_actions
self.encoder_filters = encoder_filters
self.encoder_filter_sizes = encoder_filter_sizes
self.encoder_strides = encoder_strides
self.encoder_neurons = encoder_neurons
self.learning_rate_encoder = learning_rate_encoder
self.learning_rate_r = learning_rate_r
self.learning_rate_t = learning_rate_t
self.weight_decay = weight_decay
self.gamma = gamma
self.optimizer_encoder = optimizer_encoder
self.optimizer_model = optimizer_model
self.max_steps = max_steps
self.batch_norm = batch_norm
self.stop_step_two_reward_gradients = stop_step_two_reward_gradients
self.model_init_std = model_init_std
self.no_sample = no_sample
self.hand_states_pl, self.next_hand_states_pl = None, None
def encode(self, depths, hand_states, batch_size=100):
num_steps = int(np.ceil(depths.shape[0] / batch_size))
embeddings = []
for i in range(num_steps):
batch_slice = np.index_exp[i * batch_size:(i + 1) * batch_size]
feed_dict = {
self.states_pl: depths[batch_slice],
self.hand_states_pl: hand_states[batch_slice],
self.is_training_pl: False
}
# TODO: I could sample
embedding = self.session.run(self.mu_t, feed_dict=feed_dict)
embeddings.append(embedding)
embeddings = np.concatenate(embeddings, axis=0)
return embeddings
def validate(self, depths, hand_states, actions, rewards, next_actions, next_rewards, dones, batch_size=100):
num_steps = int(np.ceil(depths.shape[0] / batch_size))
losses = []
for i in range(num_steps):
batch_slice = np.index_exp[i * batch_size:(i + 1) * batch_size]
feed_dict = {
self.states_pl: depths[batch_slice],
self.hand_states_pl: hand_states[:, np.newaxis][batch_slice],
self.actions_pl: actions[batch_slice],
self.rewards_pl: rewards[batch_slice],
self.next_actions_pl: next_actions[batch_slice],
self.next_rewards_pl: next_rewards[batch_slice],
self.dones_pl: dones[batch_slice],
self.is_training_pl: False
}
l1, l2 = self.session.run(
[self.full_step_one_reward_loss_t, self.full_step_two_reward_loss_t], feed_dict=feed_dict
)
losses.append(np.transpose(np.array([l1, l2]), axes=(1, 0)))
losses = np.concatenate(losses, axis=0)
return losses
def validate_and_encode(self, depths, hand_states, actions, rewards, next_actions, next_rewards, dones,
batch_size=100):
num_steps = int(np.ceil(depths.shape[0] / batch_size))
losses = []
embeddings = []
for i in range(num_steps):
batch_slice = np.index_exp[i * batch_size:(i + 1) * batch_size]
feed_dict = {
self.states_pl: depths[batch_slice],
self.hand_states_pl: hand_states[:, np.newaxis][batch_slice],
self.actions_pl: actions[batch_slice],
self.rewards_pl: rewards[batch_slice],
self.next_actions_pl: next_actions[batch_slice],
self.next_rewards_pl: next_rewards[batch_slice],
self.dones_pl: dones[batch_slice],
self.is_training_pl: False
}
tmp_embeddings, l1, l2 = self.session.run([
self.mu_t, self.full_step_one_reward_loss_t, self.full_step_two_reward_loss_t],
feed_dict=feed_dict
)
losses.append(np.transpose(np.array([l1, l2]), axes=(1, 0)))
embeddings.append(tmp_embeddings)
losses = np.concatenate(losses, axis=0)
embeddings = np.concatenate(embeddings)
return losses, embeddings
def build(self):
self.build_placeholders()
self.build_model()
self.build_training()
def build_placeholders(self):
self.states_pl = tf.placeholder(tf.float32, shape=(None, *self.input_shape), name="states_pl")
self.hand_states_pl = tf.placeholder(tf.float32, shape=(None, 1), name="hand_states_pl")
self.actions_pl = tf.placeholder(tf.int32, shape=(None,), name="actions_pl")
self.rewards_pl = tf.placeholder(tf.float32, shape=(None,), name="rewards_pl")
self.dones_pl = tf.placeholder(tf.bool, shape=(None,), name="dones_pl")
self.next_actions_pl = tf.placeholder(tf.int32, shape=(None,), name="next_actions_pl")
self.next_rewards_pl = tf.placeholder(tf.float32, shape=(None,), name="next_rewards_pl")
self.is_training_pl = tf.placeholder(tf.bool, shape=[], name="is_training_pl")
def build_model(self):
self.mu_t, self.log_var_t, self.sample_t = self.build_encoder(self.states_pl, self.hand_states_pl)
self.r_v = tf.get_variable(
"reward_matrix", shape=(self.num_actions, self.num_blocks), dtype=tf.float32,
initializer=tf.random_normal_initializer(mean=0, stddev=np.sqrt(2 / self.num_blocks), dtype=tf.float32)
)
self.r_t = self.r_v
self.t_v = tf.get_variable(
"transition_matrix", shape=(self.num_actions, self.num_blocks, self.num_blocks), dtype=tf.float32,
initializer=tf.random_normal_initializer(mean=0, stddev=np.sqrt(2 / self.num_blocks), dtype=tf.float32)
)
self.t_t = self.t_v
#self.t_t = tf.nn.softmax(self.t_v, axis=-1)
def build_training(self):
# create global training step variable
self.global_step = tf.train.get_or_create_global_step()
# gather appropriate transition matrices
r_per_action_t = tf.gather(self.r_t, self.actions_pl)
t_per_action_t = tf.gather(self.t_t, self.actions_pl)
next_r_per_action_t = tf.gather(self.r_t, self.next_actions_pl)
if self.stop_step_two_reward_gradients:
next_r_per_action_t = tf.stop_gradient(next_r_per_action_t)
# one step reward loss
self.full_step_one_reward_loss_t, self.step_one_reward_loss_t = \
self.build_reward_loss(r_per_action_t, self.rewards_pl, self.sample_t)
# two step reward loss
self.transformed_next_state_sample_t = \
tf.matmul(self.sample_t[:, tf.newaxis, :], t_per_action_t)[:, 0, :]
self.full_step_two_reward_loss_t, self.step_two_reward_loss_t = \
self.build_reward_loss(
next_r_per_action_t, self.next_rewards_pl, self.transformed_next_state_sample_t, dones=self.dones_pl
)
# regularization
reg = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
if len(reg) > 0:
self.regularization_loss_t = tf.add_n(reg)
else:
self.regularization_loss_t = 0
# full loss
self.gamma_v = tf.Variable(initial_value=self.gamma, trainable=False, dtype=tf.float32)
self.loss_t = self.step_one_reward_loss_t + tf.stop_gradient(self.gamma_v) * self.step_two_reward_loss_t + \
self.regularization_loss_t
# optimizers
encoder_variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=self.ENCODER_NAMESPACE)
encoder_optimizer = agent_utils.get_optimizer(self.optimizer_encoder, self.learning_rate_encoder)
self.encoder_train_step = encoder_optimizer.minimize(
self.loss_t, global_step=self.global_step, var_list=encoder_variables
)
if self.batch_norm:
self.update_op = tf.group(*tf.get_collection(tf.GraphKeys.UPDATE_OPS))
self.encoder_train_step = tf.group(self.encoder_train_step, self.update_op)
model_train_step = []
r_optimizer = agent_utils.get_optimizer(self.optimizer_model, self.learning_rate_r)
self.r_step = r_optimizer.minimize(
self.loss_t, var_list=[self.r_v]
)
model_train_step.append(self.r_step)
t_optimizer = agent_utils.get_optimizer(self.optimizer_model, self.learning_rate_t)
self.t_step = t_optimizer.minimize(
self.loss_t, var_list=[self.t_v]
)
model_train_step.append(self.t_step)
self.model_train_step = tf.group(*model_train_step)
self.train_step = tf.group(self.encoder_train_step, self.model_train_step)
@staticmethod
def build_reward_loss(expected_rewards, observed_rewards, encodings, dones=None):
# compute the reward loss
term1_t = tf.square(
observed_rewards - tf.reduce_sum(encodings * expected_rewards, axis=1)
)
full_reward_loss_t = (1 / 2) * term1_t
if dones is not None:
dones = (1.0 - tf.cast(dones, dtype=tf.float32))
full_reward_loss_t = full_reward_loss_t * dones
reward_loss_t = tf.reduce_sum(full_reward_loss_t) / tf.reduce_max([tf.reduce_sum(dones), 1.0])
else:
reward_loss_t = tf.reduce_mean(full_reward_loss_t)
return full_reward_loss_t, reward_loss_t
def build_encoder(self, depth_pl, hand_state_pl, share_weights=False, namespace=ENCODER_NAMESPACE):
x = tf.expand_dims(depth_pl, axis=-1)
with tf.variable_scope(namespace, reuse=share_weights):
for idx in range(len(self.encoder_filters)):
with tf.variable_scope("conv{:d}".format(idx + 1)):
x = tf.layers.conv2d(
x, self.encoder_filters[idx], self.encoder_filter_sizes[idx], self.encoder_strides[idx],
padding="SAME", activation=tf.nn.relu if not self.batch_norm else None,
kernel_regularizer=agent_utils.get_weight_regularizer(self.weight_decay),
kernel_initializer=agent_utils.get_mrsa_initializer(),
use_bias=not self.batch_norm
)
if self.batch_norm and idx != len(self.encoder_filters) - 1:
x = tf.layers.batch_normalization(x, training=self.is_training_pl)
x = tf.nn.relu(x)
x = tf.layers.flatten(x)
if self.batch_norm:
x = tf.layers.batch_normalization(x, training=self.is_training_pl)
x = tf.nn.relu(x)
x = tf.concat([x, hand_state_pl], axis=1)
for idx, neurons in enumerate(self.encoder_neurons):
with tf.variable_scope("fc{:d}".format(idx + 1)):
x = tf.layers.dense(
x, neurons, activation=tf.nn.relu if not self.batch_norm else None,
kernel_regularizer=agent_utils.get_weight_regularizer(self.weight_decay),
kernel_initializer=agent_utils.get_mrsa_initializer(),
use_bias=not self.batch_norm
)
if self.batch_norm:
x = tf.layers.batch_normalization(x, training=self.is_training_pl)
x = tf.nn.relu(x)
with tf.variable_scope("predict"):
mu = tf.layers.dense(
x, self.num_blocks, activation=None,
kernel_regularizer=agent_utils.get_weight_regularizer(self.weight_decay),
kernel_initializer=agent_utils.get_mrsa_initializer()
)
log_var = tf.layers.dense(
x, self.num_blocks, activation=None,
kernel_regularizer=agent_utils.get_weight_regularizer(self.weight_decay),
kernel_initializer=agent_utils.get_mrsa_initializer()
)
if self.no_sample:
sample = mu
else:
noise = tf.random_normal(
shape=(tf.shape(mu)[0], self.num_blocks), mean=0, stddev=1.0
)
var_t = tf.exp(log_var)
sd = tf.sqrt(var_t)
sd_noise_t = noise * sd
sample = mu + sd_noise_t
return mu, log_var, sample
def build_summaries(self):
# losses
tf.summary.scalar("loss", self.loss_t)
tf.summary.scalar("step_one_reward_loss", tf.reduce_mean(self.step_one_reward_loss_t))
tf.summary.scalar("step_two_reward_loss", tf.reduce_mean(self.step_two_reward_loss_t))
# means and variances
agent_utils.summarize(self.mu_t, "means")
agent_utils.summarize(self.log_var_t, "log_variances")
# grad norms
self.build_gradient_norm_summary(self.loss_t, self.mu_t, "means_grad_norm")
self.build_gradient_norm_summary(self.loss_t, self.log_var_t, "log_variances_grad_norm")
self.build_gradient_norm_summary(self.loss_t, self.r_v, "r_grad_norm", matrix_grad=True)
self.build_gradient_norm_summary(self.loss_t, self.t_v, "t_grad_norm", matrix_grad=True)
def build_gradient_norm_summary(self, y, x, name, matrix_grad=False):
# TODO: different dimensions of r_t and t_t
grad_t = tf.gradients(y, x)
if matrix_grad:
grad_t = grad_t[0]
norm_t = tf.norm(grad_t, ord=2, axis=-1)[0]
agent_utils.summarize(norm_t, name)
def set_gamma(self, value):
self.session.run(tf.assign(self.gamma_v, value))
def start_session(self, gpu_memory=None):
gpu_options = None
if gpu_memory is not None:
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=gpu_memory)
tf_config = tf.ConfigProto(gpu_options=gpu_options)
self.session = tf.Session(config=tf_config)
self.session.run(tf.global_variables_initializer())
def stop_session(self):
if self.session is not None:
self.session.close()
def save_matrices_as_images(self, step, save_dir, ext="pdf"):
r, p = self.session.run([self.r_t, self.t_t])
r = np.reshape(r, (r.shape[0], -1))
p = np.reshape(p, (p.shape[0], -1))
r_path = os.path.join(save_dir, "r_{:d}.{}".format(step, ext))
p_path = os.path.join(save_dir, "p_{:d}.{}".format(step, ext))
plt.clf()
plt.imshow(r, vmin=-0.5, vmax=1.5)
plt.colorbar()
plt.savefig(r_path)
plt.clf()
plt.imshow(p, vmin=0, vmax=1)
plt.colorbar()
plt.savefig(p_path)
class RobsTwoStepModelHierarchy:
L1_NAMESPACE = "l1"
L2_NAMESPACE = "l2"
def __init__(self, input_shape, l1_num_blocks, l2_num_blocks, num_actions, encoder_filters, encoder_filter_sizes,
encoder_strides, encoder_neurons, l1_learning_rate_encoder, l2_learning_rate_encoder,
learning_rate_r, learning_rate_t,
weight_decay, gamma, optimizer_encoder, optimizer_model, max_steps, l2_hiddens, batch_norm=True,
stop_step_two_reward_gradients=False, model_init_std=0.1, no_sample=False):
self.input_shape = input_shape
self.l1_num_blocks = l1_num_blocks
self.l2_num_blocks = l2_num_blocks
self.num_actions = num_actions
self.encoder_filters = encoder_filters
self.encoder_filter_sizes = encoder_filter_sizes
self.encoder_strides = encoder_strides
self.encoder_neurons = encoder_neurons
self.l1_learning_rate_encoder = l1_learning_rate_encoder
self.l2_learning_rate_encoder = l2_learning_rate_encoder
self.learning_rate_r = learning_rate_r
self.learning_rate_t = learning_rate_t
self.weight_decay = weight_decay
self.gamma = gamma
self.optimizer_encoder = optimizer_encoder
self.optimizer_model = optimizer_model
self.max_steps = max_steps
self.l2_hiddens = l2_hiddens
self.batch_norm = batch_norm
self.stop_step_two_reward_gradients = stop_step_two_reward_gradients
self.model_init_std = model_init_std
self.no_sample = no_sample
self.hand_states_pl, self.next_hand_states_pl = None, None
def encode(self, depths, hand_states, level, batch_size=100):
num_steps = int(np.ceil(depths.shape[0] / batch_size))
embeddings = []
for i in range(num_steps):
batch_slice = np.index_exp[i * batch_size:(i + 1) * batch_size]
feed_dict = {
self.states_pl: depths[batch_slice],
self.hand_states_pl: hand_states[batch_slice],
self.is_training_pl: False
}
if level == self.L1_NAMESPACE:
to_run = self.l1_mu_t
else:
to_run = self.l2_softmax_t
embedding = self.session.run(to_run, feed_dict=feed_dict)
embeddings.append(embedding)
embeddings = np.concatenate(embeddings, axis=0)
return embeddings
def validate(self, depths, hand_states, actions, rewards, next_actions, next_rewards, dones, level, batch_size=100):
num_steps = int(np.ceil(depths.shape[0] / batch_size))
losses = []
for i in range(num_steps):
batch_slice = np.index_exp[i * batch_size:(i + 1) * batch_size]
feed_dict = {
self.states_pl: depths[batch_slice],
self.hand_states_pl: hand_states[:, np.newaxis][batch_slice],
self.actions_pl: actions[batch_slice],
self.rewards_pl: rewards[batch_slice],
self.next_actions_pl: next_actions[batch_slice],
self.next_rewards_pl: next_rewards[batch_slice],
self.dones_pl: dones[batch_slice],
self.is_training_pl: False
}
if level == self.L1_NAMESPACE:
to_run = [self.l1_full_step_one_reward_loss_t, self.l1_full_step_two_reward_loss_t]
else:
to_run = [self.l2_full_step_one_reward_loss_t, self.l2_full_step_two_reward_loss_t]
l1, l2 = self.session.run(to_run, feed_dict=feed_dict)
losses.append(np.transpose(np.array([l1, l2]), axes=(1, 0)))
losses = np.concatenate(losses, axis=0)
return losses
def validate_and_encode(self, depths, hand_states, actions, rewards, next_actions, next_rewards, dones,
level, batch_size=100):
num_steps = int(np.ceil(depths.shape[0] / batch_size))
losses = []
embeddings = []
for i in range(num_steps):
batch_slice = np.index_exp[i * batch_size:(i + 1) * batch_size]
feed_dict = {
self.states_pl: depths[batch_slice],
self.hand_states_pl: hand_states[:, np.newaxis][batch_slice],
self.actions_pl: actions[batch_slice],
self.rewards_pl: rewards[batch_slice],
self.next_actions_pl: next_actions[batch_slice],
self.next_rewards_pl: next_rewards[batch_slice],
self.dones_pl: dones[batch_slice],
self.is_training_pl: False
}
if level == self.L1_NAMESPACE:
to_run = [self.l1_mu_t, self.l1_full_step_one_reward_loss_t, self.l1_full_step_two_reward_loss_t]
else:
to_run = [self.l2_softmax_t, self.l2_full_step_one_reward_loss_t, self.l2_full_step_two_reward_loss_t]
tmp_embeddings, l1, l2 = self.session.run(to_run, feed_dict=feed_dict)
losses.append(np.transpose(np.array([l1, l2]), axes=(1, 0)))
embeddings.append(tmp_embeddings)
losses = np.concatenate(losses, axis=0)
embeddings = np.concatenate(embeddings)
return losses, embeddings
def build(self):
self.build_placeholders()
self.build_model()
self.build_training()
def build_placeholders(self):
self.states_pl = tf.placeholder(tf.float32, shape=(None, *self.input_shape), name="states_pl")
self.hand_states_pl = tf.placeholder(tf.float32, shape=(None, 1), name="hand_states_pl")
self.actions_pl = tf.placeholder(tf.int32, shape=(None,), name="actions_pl")
self.rewards_pl = tf.placeholder(tf.float32, shape=(None,), name="rewards_pl")
self.dones_pl = tf.placeholder(tf.bool, shape=(None,), name="dones_pl")
self.next_actions_pl = tf.placeholder(tf.int32, shape=(None,), name="next_actions_pl")
self.next_rewards_pl = tf.placeholder(tf.float32, shape=(None,), name="next_rewards_pl")
self.is_training_pl = tf.placeholder(tf.bool, shape=[], name="is_training_pl")
def build_model(self):
# build level one model
self.l1_mu_t, self.l1_log_var_t, self.l1_sample_t = self.build_l1(self.states_pl, self.hand_states_pl)
self.l1_r_v = tf.get_variable(
"reward_matrix_l1", shape=(self.num_actions, self.l1_num_blocks), dtype=tf.float32,
initializer=tf.random_normal_initializer(mean=0, stddev=np.sqrt(2 / self.l1_num_blocks), dtype=tf.float32)
)
self.l1_r_t = self.l1_r_v
self.l1_t_v = tf.get_variable(
"transition_matrix_l1", shape=(self.num_actions, self.l1_num_blocks, self.l1_num_blocks), dtype=tf.float32,
initializer=tf.random_normal_initializer(mean=0, stddev=np.sqrt(2 / self.l1_num_blocks), dtype=tf.float32)
)
self.l1_t_t = self.l1_t_v
# build level two model
self.l2_logits_t, self.l2_softmax_t = self.build_l2(self.l1_mu_t)
self.l2_r_v = tf.get_variable(
"reward_matrix_l2", shape=(self.num_actions, self.l2_num_blocks), dtype=tf.float32,
initializer=tf.random_uniform_initializer(minval=0, maxval=1, dtype=tf.float32)
)
self.l2_r_t = self.l2_r_v
self.l2_t_v = tf.get_variable(
"transition_matrix_l2", shape=(self.num_actions, self.l2_num_blocks, self.l2_num_blocks), dtype=tf.float32,
initializer=tf.random_normal_initializer(mean=0, stddev= | np.sqrt(2 / self.l2_num_blocks) | numpy.sqrt |
"""
Streamline plotting for 2D vector fields.
"""
import numpy as np
import matplotlib
import matplotlib.cm as cm
import matplotlib.colors as mcolors
import matplotlib.collections as mcollections
import matplotlib.patches as patches
__all__ = ['streamplot']
def streamplot(axes, x, y, u, v, density=1, linewidth=None, color=None,
cmap=None, norm=None, arrowsize=1, arrowstyle='-|>',
minlength=0.1, transform=None):
"""Draws streamlines of a vector flow.
*x*, *y* : 1d arrays
an *evenly spaced* grid.
*u*, *v* : 2d arrays
x and y-velocities. Number of rows should match length of y, and
the number of columns should match x.
*density* : float or 2-tuple
Controls the closeness of streamlines. When `density = 1`, the domain
is divided into a 25x25 grid---*density* linearly scales this grid.
Each cell in the grid can have, at most, one traversing streamline.
For different densities in each direction, use [density_x, density_y].
*linewidth* : numeric or 2d array
vary linewidth when given a 2d array with the same shape as velocities.
*color* : matplotlib color code, or 2d array
Streamline color. When given an array with the same shape as
velocities, *color* values are converted to colors using *cmap*.
*cmap* : :class:`~matplotlib.colors.Colormap`
Colormap used to plot streamlines and arrows. Only necessary when using
an array input for *color*.
*norm* : :class:`~matplotlib.colors.Normalize`
Normalize object used to scale luminance data to 0, 1. If None, stretch
(min, max) to (0, 1). Only necessary when *color* is an array.
*arrowsize* : float
Factor scale arrow size.
*arrowstyle* : str
Arrow style specification.
See :class:`~matplotlib.patches.FancyArrowPatch`.
*minlength* : float
Minimum length of streamline in axes coordinates.
Returns:
*stream_container* : StreamplotSet
Container object with attributes
- lines: `matplotlib.collections.LineCollection` of streamlines
- arrows: collection of `matplotlib.patches.FancyArrowPatch`
objects representing arrows half-way along stream
lines.
This container will probably change in the future to allow changes
to the colormap, alpha, etc. for both lines and arrows, but these
changes should be backward compatible.
"""
grid = Grid(x, y)
mask = StreamMask(density)
dmap = DomainMap(grid, mask)
# default to data coordinates
if transform is None:
transform = axes.transData
if color is None:
color = next(axes._get_lines.color_cycle)
if linewidth is None:
linewidth = matplotlib.rcParams['lines.linewidth']
line_kw = {}
arrow_kw = dict(arrowstyle=arrowstyle, mutation_scale=10 * arrowsize)
use_multicolor_lines = isinstance(color, np.ndarray)
if use_multicolor_lines:
assert color.shape == grid.shape
line_colors = []
else:
line_kw['color'] = color
arrow_kw['color'] = color
if isinstance(linewidth, np.ndarray):
assert linewidth.shape == grid.shape
line_kw['linewidth'] = []
else:
line_kw['linewidth'] = linewidth
arrow_kw['linewidth'] = linewidth
## Sanity checks.
assert u.shape == grid.shape
assert v.shape == grid.shape
if np.any(np.isnan(u)):
u = np.ma.array(u, mask=np.isnan(u))
if np.any(np.isnan(v)):
v = np.ma.array(v, mask=np.isnan(v))
integrate = get_integrator(u, v, dmap, minlength)
trajectories = []
for xm, ym in _gen_starting_points(mask.shape):
if mask[ym, xm] == 0:
xg, yg = dmap.mask2grid(xm, ym)
t = integrate(xg, yg)
if t is not None:
trajectories.append(t)
if use_multicolor_lines:
if norm is None:
norm = mcolors.normalize(color.min(), color.max())
if cmap is None:
cmap = cm.get_cmap(matplotlib.rcParams['image.cmap'])
else:
cmap = cm.get_cmap(cmap)
streamlines = []
arrows = []
for t in trajectories:
tgx = np.array(t[0])
tgy = np.array(t[1])
# Rescale from grid-coordinates to data-coordinates.
tx = np.array(t[0]) * grid.dx + grid.x_origin
ty = | np.array(t[1]) | numpy.array |
"""
@author: <NAME> (2017, Vrije Universiteit Brussel)
"""
import numpy as np
from collections import OrderedDict
import sys
sys.path.insert(0, '../')
from python_backend.user_jobs import TheoreticalUser
from python_backend import specs_jobs, utils_jobs
def get_random_job_matrix(min_num_jobs, seed, normalise=True):
""" Returns a matrix (num_jobs x num_objectives) of random job offers """
random_state = np.random.RandomState(seed)
for j in range(10):
num_rand_jobs = min_num_jobs*100
# initialise job matrix
job_matrix = | np.zeros((num_rand_jobs, specs_jobs.NUM_OBJECTIVES)) | numpy.zeros |
import os
import glob
import random
import torch
import imageio
import errno
import numpy as np
import tifffile as tiff
import torch.nn as nn
import matplotlib.pyplot as plt
from torch.utils import data
from sklearn.metrics import confusion_matrix
# =============================================
class CustomDataset(torch.utils.data.Dataset):
def __init__(self, imgs_folder, labels_folder, augmentation):
# 1. Initialize file paths or a list of file names.
self.imgs_folder = imgs_folder
self.labels_folder = labels_folder
self.data_augmentation = augmentation
# self.transform = transforms
def __getitem__(self, index):
# 1. Read one data from file (e.g. using num py.fromfile, PIL.Image.open).
# 2. Preprocess the data (e.g. torchvision.Transform).
# 3. Return a data pair (e.g. image and label).
all_images = glob.glob(os.path.join(self.imgs_folder, '*.npy'))
all_labels = glob.glob(os.path.join(self.labels_folder, '*.npy'))
# sort all in the same order
all_labels.sort()
all_images.sort()
#
# label = Image.open(all_labels[index])
# label = tiff.imread(all_labels[index])
label = np.load(all_labels[index])
label = np.array(label, dtype='float32')
# image = tiff.imread(all_images[index])
image = np.load(all_images[index])
image = np.array(image, dtype='float32')
#
labelname = all_labels[index]
path_label, labelname = os.path.split(labelname)
labelname, labelext = os.path.splitext(labelname)
#
c_amount = len(np.shape(label))
#
#
# Reshaping everyting to make sure the order: channel x height x width
if c_amount == 3:
d1, d2, d3 = np.shape(label)
if d1 != min(d1, d2, d3):
label = np.reshape(label, (d3, d1, d2))
#
elif c_amount == 2:
h, w = np.shape(label)
label = np.reshape(label, (1, h, w))
#
d1, d2, d3 = np.shape(image)
#
if d1 != min(d1, d2, d3):
#
image = np.reshape(image, (d3, d1, d2))
#
if self.data_augmentation == 'full':
# augmentation:
augmentation = random.uniform(0, 1)
#
if augmentation < 0.25:
#
c, h, w = np.shape(image)
#
for channel in range(c):
#
image[channel, :, :] = np.flip(image[channel, :, :], axis=0).copy()
image[channel, :, :] = np.flip(image[channel, :, :], axis=1).copy()
#
label = np.flip(label, axis=1).copy()
label = np.flip(label, axis=2).copy()
elif augmentation < 0.5:
#
mean = 0.0
sigma = 0.15
noise = np.random.normal(mean, sigma, image.shape)
mask_overflow_upper = image + noise >= 1.0
mask_overflow_lower = image + noise < 0.0
noise[mask_overflow_upper] = 1.0
noise[mask_overflow_lower] = 0.0
image += noise
elif augmentation < 0.75:
#
c, h, w = np.shape(image)
#
for channel in range(c):
#
channel_ratio = random.uniform(0, 1)
#
image[channel, :, :] = image[channel, :, :] * channel_ratio
elif self.data_augmentation == 'flip':
# augmentation:
augmentation = random.uniform(0, 1)
#
if augmentation > 0.5 or augmentation == 0.5:
#
c, h, w = np.shape(image)
#
for channel in range(c):
#
image[channel, :, :] = np.flip(image[channel, :, :], axis=0).copy()
#
label = np.flip(label, axis=1).copy()
elif self.data_augmentation == 'all_flip':
# augmentation:
augmentation = random.uniform(0, 1)
#
if augmentation > 0.5 or augmentation == 0.5:
#
c, h, w = np.shape(image)
#
for channel in range(c):
#
image[channel, :, :] = np.flip(image[channel, :, :], axis=0).copy()
image[channel, :, :] = np.flip(image[channel, :, :], axis=1).copy()
#
label = np.flip(label, axis=1).copy()
label = np.flip(label, axis=2).copy()
return image, label, labelname
def __len__(self):
# You should change 0 to the total size of your dataset.
return len(glob.glob(os.path.join(self.imgs_folder, '*.npy')))
# ============================================================================================
def evaluate_noisy_label(data, model1, model2, class_no):
"""
Args:
data:
model1:
model2:
class_no:
Returns:
"""
model1.eval()
model2.eval()
#
test_dice = 0
test_dice_all = []
#
for i, (v_images, v_labels_over, v_labels_under, v_labels_wrong, v_labels_true, v_imagename) in enumerate(data):
#
v_images = v_images.to(device='cuda', dtype=torch.float32)
v_outputs_logits = model1(v_images)
v_outputs_logits_noisy = model2(v_images)
#
_, v_output = torch.max(v_outputs_logits, dim=1)
v_outputs_noisy = []
#
for v_noisy_logit in v_outputs_logits_noisy:
#
_, v_noisy_output = torch.max(v_noisy_logit, dim=1)
v_outputs_noisy.append(v_noisy_output.cpu().detach().numpy())
#
v_dice_ = segmentation_scores(v_labels_true, v_output.cpu().detach().numpy(), class_no)
#
epoch_noisy_labels = [v_labels_over.cpu().detach().numpy(), v_labels_under.cpu().detach().numpy(), v_labels_wrong.cpu().detach().numpy(), v_labels_true.cpu().detach().numpy()]
v_ged = generalized_energy_distance(epoch_noisy_labels, v_outputs_noisy, class_no)
test_dice += v_dice_
test_dice_all.append(test_dice)
#
# print(i)
# print(test_dice)
# print(test_dice / (i + 1))
return test_dice / (i + 1), v_ged
def evaluate_noisy_label_2(data, model1, model2, class_no):
"""
Args:
data:
model1:
model2:
class_no:
Returns:
"""
model1.eval()
model2.eval()
test_dice = 0
test_dice_all = []
for i, (v_images, v_labels_over, v_labels_under, v_labels_wrong, v_labels_true, v_imagename) in enumerate(data):
#
v_images = v_images.to(device='cuda', dtype=torch.float32)
v_outputs_logits = model1(v_images)
b, c, h, w = v_outputs_logits.size()
v_outputs_logits = nn.Softmax(dim=1)(v_outputs_logits)
cms = model2(v_images)
#
_, v_output = torch.max(v_outputs_logits, dim=1)
v_outputs_noisy = []
#
for cm in cms:
#
cm = cm.reshape(b * h * w, c, c)
cm = cm / cm.sum(1, keepdim=True)
v_noisy_logit = torch.bmm(cm, v_outputs_logits.reshape(b * h * w, c, 1)).reshape(b, c, h, w)
_, v_noisy_output = torch.max(v_noisy_logit, dim=1)
v_outputs_noisy.append(v_noisy_output.cpu().detach().numpy())
#
v_dice_ = segmentation_scores(v_labels_true, v_output.cpu().detach().numpy(), class_no)
#
epoch_noisy_labels = [v_labels_over.cpu().detach().numpy(), v_labels_under.cpu().detach().numpy(), v_labels_wrong.cpu().detach().numpy(), v_labels_true.cpu().detach().numpy()]
v_ged = generalized_energy_distance(epoch_noisy_labels, v_outputs_noisy, class_no)
test_dice += v_dice_
test_dice_all.append(test_dice)
#
# print(i)
# print(test_dice)
# print(test_dice / (i + 1))
return test_dice / (i + 1), v_ged
def evaluate_noisy_label_3(data, model1, class_no):
"""
Args:
data:
model1:
class_no:
Returns:
"""
model1.eval()
# model2.eval()
#
test_dice = 0
test_dice_all = []
#
for i, (v_images, v_labels_over, v_labels_under, v_labels_wrong, v_labels_good, v_imagename) in enumerate(data):
#
# print(i)
#
v_images = v_images.to(device='cuda', dtype=torch.float32)
v_outputs_logits, cms = model1(v_images)
b, c, h, w = v_outputs_logits.size()
v_outputs_logits = nn.Softmax(dim=1)(v_outputs_logits)
# cms = model2(v_images)
#
_, v_output = torch.max(v_outputs_logits, dim=1)
v_outputs_noisy = []
#
# v_outputs_logits = v_outputs_logits.permute(0, 2, 3, 1).contiguous()
# v_outputs_logits = v_outputs_logits.reshape(b * h * w, c, 1)
#
for cm in cms:
#
cm = cm.reshape(b * h * w, c, c)
cm = cm / cm.sum(1, keepdim=True)
v_noisy_output = torch.bmm(cm, v_outputs_logits.reshape(b * h * w, c, 1)).reshape(b, c, h, w)
# cm = cm.permute(0, 2, 3, 1).contiguous().view(b * h * w, c, c)
# cm = cm / cm.sum(1, keepdim=True)
# v_noisy_output = torch.bmm(cm, v_outputs_logits)
# v_noisy_output = v_noisy_output.view(b, h, w, c).permute(0, 3, 1, 2).contiguous()
_, v_noisy_output = torch.max(v_noisy_output, dim=1)
v_outputs_noisy.append(v_noisy_output.cpu().detach().numpy())
#
v_dice_ = segmentation_scores(v_labels_good, v_output.cpu().detach().numpy(), class_no)
#
epoch_noisy_labels = [v_labels_over.cpu().detach().numpy(), v_labels_under.cpu().detach().numpy(), v_labels_wrong.cpu().detach().numpy(), v_labels_good.cpu().detach().numpy()]
v_ged = generalized_energy_distance(epoch_noisy_labels, v_outputs_noisy, class_no)
test_dice += v_dice_
test_dice_all.append(test_dice)
#
# print(i)
# print(test_dice)
# print(test_dice / (i + 1))
#
return test_dice / (i + 1), v_ged
def evaluate_noisy_label_4(data, model1, class_no):
"""
Args:
data:
model1:
class_no:
Returns:
"""
model1.eval()
# model2.eval()
#
test_dice = 0
test_dice_all = []
#
for i, (v_images, v_labels_over, v_labels_under, v_labels_wrong, v_labels_good, v_imagename) in enumerate(data):
#
# print(i)
#
v_images = v_images.to(device='cuda', dtype=torch.float32)
v_outputs_logits, cms = model1(v_images)
b, c, h, w = v_outputs_logits.size()
v_outputs_logits = nn.Softmax(dim=1)(v_outputs_logits)
# cms = model2(v_images)
#
_, v_output = torch.max(v_outputs_logits, dim=1)
v_outputs_noisy = []
#
v_outputs_logits = v_outputs_logits.view(b, c, h*w)
v_outputs_logits = v_outputs_logits.permute(0, 2, 1).contiguous().view(b*h*w, c)
v_outputs_logits = v_outputs_logits.view(b * h * w, c, 1)
#
for cm in cms:
#
cm = cm.reshape(b, c**2, h*w).permute(0, 2, 1).contiguous().view(b*h*w, c*c).view(b*h*w, c, c)
cm = cm / cm.sum(1, keepdim=True)
v_noisy_output = torch.bmm(cm, v_outputs_logits).view(b*h*w, c)
v_noisy_output = v_noisy_output.view(b, h*w, c).permute(0, 2, 1).contiguous().view(b, c, h, w)
_, v_noisy_output = torch.max(v_noisy_output, dim=1)
v_outputs_noisy.append(v_noisy_output.cpu().detach().numpy())
#
v_dice_ = segmentation_scores(v_labels_good, v_output.cpu().detach().numpy(), class_no)
#
epoch_noisy_labels = [v_labels_over.cpu().detach().numpy(), v_labels_under.cpu().detach().numpy(), v_labels_wrong.cpu().detach().numpy(), v_labels_good.cpu().detach().numpy()]
v_ged = generalized_energy_distance(epoch_noisy_labels, v_outputs_noisy, class_no)
test_dice += v_dice_
test_dice_all.append(test_dice)
#
# print(i)
# print(test_dice)
# print(test_dice / (i + 1))
#
return test_dice / (i + 1), v_ged
def evaluate_noisy_label_6(data, model1, class_no):
"""
Args:
data:
model1:
class_no:
Returns:
"""
model1.eval()
# model2.eval()
#
test_dice = 0
test_dice_all = []
#
for i, (v_images, v_labels_over, v_labels_under, v_labels_wrong, v_labels_good, v_imagename) in enumerate(data):
#
# print(i)
#
v_images = v_images.to(device='cuda', dtype=torch.float32)
v_outputs_logits, cms = model1(v_images)
b, c, h, w = v_outputs_logits.size()
v_outputs_logits = nn.Softmax(dim=1)(v_outputs_logits)
# cms = model2(v_images)
#
_, v_output = torch.max(v_outputs_logits, dim=1)
v_outputs_noisy = []
#
v_outputs_logits = v_outputs_logits.view(b, c, h*w)
v_outputs_logits = v_outputs_logits.permute(0, 2, 1).contiguous().view(b*h*w, c)
v_outputs_logits = v_outputs_logits.view(b * h * w, c, 1)
#
for cm in cms:
#
b, c_r_d, h, w = cm.size()
r = c_r_d // c // 2
cm1 = cm[:, 0:r * c, :, :]
if r == 1:
cm2 = cm[:, r * c:c_r_d-1, :, :]
else:
cm2 = cm[:, r * c:c_r_d-1, :, :]
cm1_reshape = cm1.view(b, c_r_d // 2, h * w).permute(0, 2, 1).contiguous().view(b * h * w, r * c).view(b * h * w, r, c)
cm2_reshape = cm2.view(b, c_r_d // 2, h * w).permute(0, 2, 1).contiguous().view(b * h * w, r * c).view(b * h * w, c, r)
#
cm1_reshape = cm1_reshape / cm1_reshape.sum(1, keepdim=True)
cm2_reshape = cm2_reshape / cm2_reshape.sum(1, keepdim=True)
#
v_noisy_output = torch.bmm(cm1_reshape, v_outputs_logits)
v_noisy_output = torch.bmm(cm2_reshape, v_noisy_output).view(b * h * w, c)
v_noisy_output = v_noisy_output.view(b, h * w, c).permute(0, 2, 1).contiguous().view(b, c, h, w)
#
# v_noisy_output = torch.bmm(cm, v_outputs_logits).view(b*h*w, c)
# v_noisy_output = v_noisy_output.view(b, h*w, c).permute(0, 2, 1).contiguous().view(b, c, h, w)
_, v_noisy_output = torch.max(v_noisy_output, dim=1)
v_outputs_noisy.append(v_noisy_output.cpu().detach().numpy())
#
v_dice_ = segmentation_scores(v_labels_good, v_output.cpu().detach().numpy(), class_no)
#
epoch_noisy_labels = [v_labels_over.cpu().detach().numpy(), v_labels_under.cpu().detach().numpy(), v_labels_wrong.cpu().detach().numpy(), v_labels_good.cpu().detach().numpy()]
v_ged = generalized_energy_distance(epoch_noisy_labels, v_outputs_noisy, class_no)
test_dice += v_dice_
test_dice_all.append(test_dice)
#
# print(i)
# print(test_dice)
# print(test_dice / (i + 1))
#
return test_dice / (i + 1), v_ged
def evaluate_noisy_label_7(data, model1, model2, class_no, low_rank):
"""
Args:
data:
model1:
model2:
class_no:
low_rank:
Returns:
"""
model1.eval()
model2.eval()
#
test_dice = 0
test_dice_all = []
#
for i, (v_images, v_labels_over, v_labels_under, v_labels_wrong, v_labels_good, v_imagename) in enumerate(data):
#
# print(i)
#
v_images = v_images.to(device='cuda', dtype=torch.float32)
v_outputs_logits = model1(v_images)
b, c, h, w = v_outputs_logits.size()
v_outputs_logits = nn.Softmax(dim=1)(v_outputs_logits)
cms = model2(v_images)
#
_, v_output = torch.max(v_outputs_logits, dim=1)
v_outputs_noisy = []
#
v_outputs_logits = v_outputs_logits.view(b, c, h*w)
v_outputs_logits = v_outputs_logits.permute(0, 2, 1).contiguous().view(b*h*w, c)
v_outputs_logits = v_outputs_logits.view(b * h * w, c, 1)
#
for cm in cms:
#
if low_rank is False:
#
cm = cm.reshape(b, c**2, h*w).permute(0, 2, 1).contiguous().view(b*h*w, c*c).view(b*h*w, c, c)
cm = cm / cm.sum(1, keepdim=True)
v_noisy_output = torch.bmm(cm, v_outputs_logits).view(b*h*w, c)
v_noisy_output = v_noisy_output.view(b, h*w, c).permute(0, 2, 1).contiguous().view(b, c, h, w)
#
else:
#
b, c_r_d, h, w = cm.size()
r = c_r_d // c // 2
cm1 = cm[:, 0:r * c, :, :]
cm2 = cm[:, r * c:c_r_d, :, :]
cm1_reshape = cm1.view(b, c_r_d // 2, h * w).permute(0, 2, 1).contiguous().view(b * h * w, r * c).view(b * h * w, r, c)
cm2_reshape = cm2.view(b, c_r_d // 2, h * w).permute(0, 2, 1).contiguous().view(b * h * w, r * c).view(b * h * w, c, r)
#
cm1_reshape = cm1_reshape / cm1_reshape.sum(1, keepdim=True)
cm2_reshape = cm2_reshape / cm2_reshape.sum(1, keepdim=True)
#
v_noisy_output = torch.bmm(cm1_reshape, v_outputs_logits)
v_noisy_output = torch.bmm(cm2_reshape, v_noisy_output).view(b * h * w, c)
v_noisy_output = v_noisy_output.view(b, h * w, c).permute(0, 2, 1).contiguous().view(b, c, h, w)
#
_, v_noisy_output = torch.max(v_noisy_output, dim=1)
v_outputs_noisy.append(v_noisy_output.cpu().detach().numpy())
#
v_dice_ = segmentation_scores(v_labels_good, v_output.cpu().detach().numpy(), class_no)
#
epoch_noisy_labels = [v_labels_over.cpu().detach().numpy(), v_labels_under.cpu().detach().numpy(), v_labels_wrong.cpu().detach().numpy(), v_labels_good.cpu().detach().numpy()]
v_ged = generalized_energy_distance(epoch_noisy_labels, v_outputs_noisy, class_no)
test_dice += v_dice_
test_dice_all.append(test_dice)
#
# print(i)
# print(test_dice)
# print(test_dice / (i + 1))
#
return test_dice / (i + 1), v_ged
def evaluate_noisy_label_5(data, model1, class_no):
"""
Args:
data:
model1:
class_no:
Returns:
"""
model1.eval()
# model2.eval()
#
test_dice = 0
test_dice_all = []
#
for i, (v_images, v_labels_over, v_labels_under, v_labels_wrong, v_labels_good, v_labels_true, v_imagename) in enumerate(data):
#
# print(i)
#
v_images = v_images.to(device='cuda', dtype=torch.float32)
v_outputs_logits, cms = model1(v_images)
b, c, h, w = v_outputs_logits.size()
v_outputs_logits = nn.Softmax(dim=1)(v_outputs_logits)
# cms = model2(v_images)
#
_, v_output = torch.max(v_outputs_logits, dim=1)
v_outputs_noisy = []
#
v_outputs_logits = v_outputs_logits.view(b, c, h*w)
v_outputs_logits = v_outputs_logits.permute(0, 2, 1).contiguous().view(b*h*w, c)
v_outputs_logits = v_outputs_logits.view(b * h * w, c, 1)
#
for cm in cms:
#
# cm = cm.reshape(b * h * w, c, c)
# cm = cm / cm.sum(1, keepdim=True)
# v_noisy_output = torch.bmm(cm, v_outputs_logits.reshape(b * h * w, c, 1)).reshape(b, c, h, w)
# cm = cm.permute(0, 2, 3, 1).contiguous().view(b * h * w, c, c)
cm = cm.reshape(b, c**2, h*w).permute(0, 2, 1).contiguous().view(b*h*w, c*c).view(b*h*w, c, c)
cm = cm / cm.sum(1, keepdim=True)
v_noisy_output = torch.bmm(cm, v_outputs_logits).view(b*h*w, c)
v_noisy_output = v_noisy_output.view(b, h*w, c).permute(0, 2, 1).contiguous().view(b, c, h, w)
_, v_noisy_output = torch.max(v_noisy_output, dim=1)
v_outputs_noisy.append(v_noisy_output.cpu().detach().numpy())
#
v_dice_ = segmentation_scores(v_labels_true, v_output.cpu().detach().numpy(), class_no)
#
epoch_noisy_labels = [v_labels_over.cpu().detach().numpy(), v_labels_under.cpu().detach().numpy(), v_labels_wrong.cpu().detach().numpy(), v_labels_true.cpu().detach().numpy(), v_labels_good.cpu().detach().numpy()]
v_ged = generalized_energy_distance(epoch_noisy_labels, v_outputs_noisy, class_no)
test_dice += v_dice_
test_dice_all.append(test_dice)
#
# print(i)
# print(test_dice)
# print(test_dice / (i + 1))
#
return test_dice / (i + 1), v_ged
def evaluate(evaluatedata, model, device, class_no):
"""
Args:
evaluatedata:
model:
device:
class_no:
Returns:
"""
model.eval()
#
with torch.no_grad():
#
test_iou = 0
#
for j, (testimg, testlabel, testname) in enumerate(evaluatedata):
#
testimg = testimg.to(device=device, dtype=torch.float32)
testlabel = testlabel.to(device=device, dtype=torch.float32)
#
testoutput = model(testimg)
if class_no == 2:
testoutput = torch.sigmoid(testoutput)
testoutput = (testoutput > 0.5).float()
else:
_, testoutput = torch.max(testoutput, dim=1)
#
mean_iu_ = segmentation_scores(testlabel.cpu().detach().numpy(), testoutput.cpu().detach().numpy(), class_no)
test_iou += mean_iu_
#
return test_iou / (j+1)
def test(testdata,
model,
device,
class_no,
save_path):
"""
Args:
testdata:
model:
device:
class_no:
save_path:
Returns:
"""
model.eval()
with torch.no_grad():
#
test_iou = 0
#
for j, (testimg, testlabel, testname) in enumerate(testdata):
#
testimg = testimg.to(device=device, dtype=torch.float32)
testlabel = testlabel.to(device=device, dtype=torch.float32)
#
testoutput = model(testimg)
if class_no == 2:
testoutput = torch.sigmoid(testoutput)
testoutput = (testoutput > 0.5).float()
else:
_, testoutput = torch.max(testoutput, dim=1)
#
mean_iu_ = segmentation_scores(testlabel.cpu().detach().numpy(), testoutput.cpu().detach().numpy(), class_no)
test_iou += mean_iu_
#
# ========================================================
# # Plotting segmentation:
# ========================================================
prediction_map_path = save_path + '/' + 'Visual_results'
#
try:
os.mkdir(prediction_map_path)
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
pass
b, c, h, w = np.shape(testlabel)
testoutput_original = np.asarray(testoutput.cpu().detach().numpy(), dtype=np.uint8)
testoutput_original = np.squeeze(testoutput_original, axis=0)
testoutput_original = np.repeat(testoutput_original[:, :, np.newaxis], 3, axis=2)
#
if class_no == 2:
segmentation_map = np.zeros((h, w, 3), dtype=np.uint8)
#
segmentation_map[:, :, 0][np.logical_and(testoutput_original[:, :, 0] == 1, testoutput_original[:, :, 1] == 1, testoutput_original[:, :, 2] == 1)] = 255
segmentation_map[:, :, 1][np.logical_and(testoutput_original[:, :, 0] == 1, testoutput_original[:, :, 1] == 1, testoutput_original[:, :, 2] == 1)] = 0
segmentation_map[:, :, 2][np.logical_and(testoutput_original[:, :, 0] == 1, testoutput_original[:, :, 1] == 1, testoutput_original[:, :, 2] == 1)] = 0
#
else:
segmentation_map = np.zeros((h, w, 3), dtype=np.uint8)
if class_no == 4:
# multi class for brats 2018
segmentation_map[:, :, 0][np.logical_and(testoutput_original[:, :, 0] == 1, testoutput_original[:, :, 1] == 1, testoutput_original[:, :, 2] == 1)] = 255
segmentation_map[:, :, 1][np.logical_and(testoutput_original[:, :, 0] == 1, testoutput_original[:, :, 1] == 1, testoutput_original[:, :, 2] == 1)] = 0
segmentation_map[:, :, 2][np.logical_and(testoutput_original[:, :, 0] == 1, testoutput_original[:, :, 1] == 1, testoutput_original[:, :, 2] == 1)] = 0
#
segmentation_map[:, :, 0][np.logical_and(testoutput_original[:, :, 0] == 2, testoutput_original[:, :, 1] == 2, testoutput_original[:, :, 2] == 2)] = 0
segmentation_map[:, :, 1][np.logical_and(testoutput_original[:, :, 0] == 2, testoutput_original[:, :, 1] == 2, testoutput_original[:, :, 2] == 2)] = 255
segmentation_map[:, :, 2][np.logical_and(testoutput_original[:, :, 0] == 2, testoutput_original[:, :, 1] == 2, testoutput_original[:, :, 2] == 2)] = 0
#
segmentation_map[:, :, 0][np.logical_and(testoutput_original[:, :, 0] == 3, testoutput_original[:, :, 1] == 3, testoutput_original[:, :, 2] == 3)] = 0
segmentation_map[:, :, 1][np.logical_and(testoutput_original[:, :, 0] == 3, testoutput_original[:, :, 1] == 3, testoutput_original[:, :, 2] == 3)] = 0
segmentation_map[:, :, 2][np.logical_and(testoutput_original[:, :, 0] == 3, testoutput_original[:, :, 1] == 3, testoutput_original[:, :, 2] == 3)] = 255
#
elif class_no == 8:
# multi class for cityscapes
segmentation_map[:, :, 0][np.logical_and(testoutput_original[:, :, 0] == 0, testoutput_original[:, :, 1] == 0, testoutput_original[:, :, 2] == 0)] = 255
segmentation_map[:, :, 1][np.logical_and(testoutput_original[:, :, 0] == 0, testoutput_original[:, :, 1] == 0, testoutput_original[:, :, 2] == 0)] = 0
segmentation_map[:, :, 2][np.logical_and(testoutput_original[:, :, 0] == 0, testoutput_original[:, :, 1] == 0, testoutput_original[:, :, 2] == 0)] = 0
#
segmentation_map[:, :, 0][np.logical_and(testoutput_original[:, :, 0] == 1, testoutput_original[:, :, 1] == 1, testoutput_original[:, :, 2] == 1)] = 0
segmentation_map[:, :, 1][np.logical_and(testoutput_original[:, :, 0] == 1, testoutput_original[:, :, 1] == 1, testoutput_original[:, :, 2] == 1)] = 255
segmentation_map[:, :, 2][np.logical_and(testoutput_original[:, :, 0] == 1, testoutput_original[:, :, 1] == 1, testoutput_original[:, :, 2] == 1)] = 0
#
segmentation_map[:, :, 0][np.logical_and(testoutput_original[:, :, 0] == 2, testoutput_original[:, :, 1] == 2, testoutput_original[:, :, 2] == 2)] = 0
segmentation_map[:, :, 1][np.logical_and(testoutput_original[:, :, 0] == 2, testoutput_original[:, :, 1] == 2, testoutput_original[:, :, 2] == 2)] = 0
segmentation_map[:, :, 2][np.logical_and(testoutput_original[:, :, 0] == 2, testoutput_original[:, :, 1] == 2, testoutput_original[:, :, 2] == 2)] = 255
#
segmentation_map[:, :, 0][np.logical_and(testoutput_original[:, :, 0] == 3, testoutput_original[:, :, 1] == 3, testoutput_original[:, :, 2] == 3)] = 255
segmentation_map[:, :, 1][np.logical_and(testoutput_original[:, :, 0] == 3, testoutput_original[:, :, 1] == 3, testoutput_original[:, :, 2] == 3)] = 255
segmentation_map[:, :, 2][np.logical_and(testoutput_original[:, :, 0] == 3, testoutput_original[:, :, 1] == 3, testoutput_original[:, :, 2] == 3)] = 0
#
segmentation_map[:, :, 0][np.logical_and(testoutput_original[:, :, 0] == 4, testoutput_original[:, :, 1] == 4, testoutput_original[:, :, 2] == 4)] = 153
segmentation_map[:, :, 1][np.logical_and(testoutput_original[:, :, 0] == 4, testoutput_original[:, :, 1] == 4, testoutput_original[:, :, 2] == 4)] = 51
segmentation_map[:, :, 2][np.logical_and(testoutput_original[:, :, 0] == 4, testoutput_original[:, :, 1] == 4, testoutput_original[:, :, 2] == 4)] = 255
#
segmentation_map[:, :, 0][np.logical_and(testoutput_original[:, :, 0] == 5, testoutput_original[:, :, 1] == 5, testoutput_original[:, :, 2] == 5)] = 255
segmentation_map[:, :, 1][np.logical_and(testoutput_original[:, :, 0] == 5, testoutput_original[:, :, 1] == 5, testoutput_original[:, :, 2] == 5)] = 102
segmentation_map[:, :, 2][np.logical_and(testoutput_original[:, :, 0] == 5, testoutput_original[:, :, 1] == 5, testoutput_original[:, :, 2] == 5)] = 178
#
segmentation_map[:, :, 0][np.logical_and(testoutput_original[:, :, 0] == 6, testoutput_original[:, :, 1] == 6, testoutput_original[:, :, 2] == 6)] = 102
segmentation_map[:, :, 1][np.logical_and(testoutput_original[:, :, 0] == 6, testoutput_original[:, :, 1] == 6, testoutput_original[:, :, 2] == 6)] = 255
segmentation_map[:, :, 2][np.logical_and(testoutput_original[:, :, 0] == 6, testoutput_original[:, :, 1] == 6, testoutput_original[:, :, 2] == 6)] = 102
#
prediction_name = 'seg_' + testname[0] + '.png'
full_error_map_name = os.path.join(prediction_map_path, prediction_name)
imageio.imsave(full_error_map_name, segmentation_map)
#
prediction_result_path = save_path + '/Quantitative_Results'
#
try:
#
os.mkdir(prediction_result_path)
#
except OSError as exc:
#
if exc.errno != errno.EEXIST:
#
raise
#
pass
#
result_dictionary = {'Test dice': str(test_iou / len(testdata))}
#
ff_path = prediction_result_path + '/test_result_data.txt'
ff = open(ff_path, 'w')
ff.write(str(result_dictionary))
ff.close()
print('Test iou: {:.4f}, '.format(test_iou / len(testdata)))
class CustomDataset_punet(torch.utils.data.Dataset):
def __init__(self, dataset_location, dataset_tag, noisylabel, augmentation=False):
#
self.label_mode = noisylabel
self.dataset_tag = dataset_tag
#
if noisylabel == 'multi':
#
if dataset_tag == 'mnist':
self.label_over_folder = dataset_location + '/Over'
self.label_under_folder = dataset_location + '/Under'
self.label_wrong_folder = dataset_location + '/Wrong'
self.label_good_folder = dataset_location + '/GT'
self.image_folder = dataset_location + '/Gaussian'
elif dataset_tag == 'brats':
self.label_over_folder = dataset_location + '/Over'
self.label_under_folder = dataset_location + '/Under'
self.label_wrong_folder = dataset_location + '/Wrong'
self.label_good_folder = dataset_location + '/Good'
self.image_folder = dataset_location + '/Image'
elif dataset_tag == 'lidc':
self.label_over_folder = dataset_location + '/Annotator_1'
self.label_under_folder = dataset_location + '/Annotator_2'
self.label_wrong_folder = dataset_location + '/Annotator_3'
self.label_good_folder = dataset_location + '/Annotator_4'
self.label_true_folder = dataset_location + '/Annotator_5'
self.image_folder = dataset_location + '/Image'
#
elif noisylabel == 'binary':
if dataset_tag == 'mnist':
self.label_folder = dataset_location + '/Mean'
self.image_folder = dataset_location + '/Gaussian'
self.true_label_folder = dataset_location + '/GT'
elif noisylabel == 'normal':
if dataset_tag == 'mnist':
self.label_folder = dataset_location + '/GT'
self.image_folder = dataset_location + '/Gaussian'
elif noisylabel == 'p_unet':
if dataset_tag == 'mnist':
self.label_folder = dataset_location + '/All'
self.image_folder = dataset_location + '/Gaussian'
self.data_aug = augmentation
def __getitem__(self, index):
if self.label_mode == 'multi':
#
if self.dataset_tag == 'mnist' or self.dataset_tag == 'brats':
#
all_labels_over = glob.glob(os.path.join(self.label_over_folder, '*.tif'))
all_labels_over.sort()
#
all_labels_under = glob.glob(os.path.join(self.label_under_folder, '*.tif'))
all_labels_under.sort()
#
all_labels_wrong = glob.glob(os.path.join(self.label_wrong_folder, '*.tif'))
all_labels_wrong.sort()
#
all_labels_good = glob.glob(os.path.join(self.label_good_folder, '*.tif'))
all_labels_good.sort()
#
all_images = glob.glob(os.path.join(self.image_folder, '*.tif'))
all_images.sort()
#
label_over = tiff.imread(all_labels_over[index])
label_over = np.array(label_over, dtype='float32')
#
label_under = tiff.imread(all_labels_under[index])
label_under = np.array(label_under, dtype='float32')
#
label_wrong = tiff.imread(all_labels_wrong[index])
label_wrong = np.array(label_wrong, dtype='float32')
#
label_good = tiff.imread(all_labels_good[index])
label_good = np.array(label_good, dtype='float32')
#
image = tiff.imread(all_images[index])
image = np.array(image, dtype='float32')
#
# dim_length = len(np.shape(label_over))
label_over[label_over == 4.0] = 3.0
label_wrong[label_wrong == 4.0] = 3.0
label_good[label_good == 4.0] = 3.0
label_under[label_under == 4.0] = 3.0
if self.dataset_tag == 'mnist':
label_over = np.where(label_over > 0.5, 1.0, 0.0)
label_under = np.where(label_under > 0.5, 1.0, 0.0)
label_wrong = np.where(label_wrong > 0.5, 1.0, 0.0)
if np.amax(label_good) != 1.0:
# sometimes, some preprocessing might give it as 0 - 255 range
label_good = np.where(label_good > 10.0, 1.0, 0.0)
else:
assert np.amax(label_good) == 1.0
label_good = np.where(label_good > 0.5, 1.0, 0.0)
# print(np.unique(label_over))
# label_over: h x w
# image: h x w x c
c_amount = len(np.shape(label_over))
# Reshaping everyting to make sure the order: channel x height x width
if c_amount == 3:
#
d1, d2, d3 = np.shape(label_over)
#
if d1 != min(d1, d2, d3):
#
assert d3 == min(d1, d2, d3)
#
label_over = np.transpose(label_over, (2, 0, 1))
label_under = np.transpose(label_under, (2, 0, 1))
label_wrong = np.transpose(label_wrong, (2, 0, 1))
label_good = np.transpose(label_good, (2, 0, 1))
#
elif c_amount == 2:
#
label_over = | np.expand_dims(label_over, axis=0) | numpy.expand_dims |
import os
import copy
import glob
import numpy as np
from gains import Absorber
import corner
from utils import (fit_2d_gmm, vcomplex, nested_ddict, make_ellipses,
baselines_2_ants, find_outliers_2d_mincov,
find_outliers_2d_dbscan, find_outliers_dbscan, fit_kde,
fit_2d_kde, hdi_of_mcmc, hdi_of_sample, bc_endpoint, ants_2_baselines)
import matplotlib
from uv_data import UVData
from from_fits import create_model_from_fits_file
from model import Model
from spydiff import import_difmap_model, modelfit_difmap
from spydiff import modelfit_difmap
matplotlib.use('Agg')
label_size = 12
matplotlib.rcParams['xtick.labelsize'] = label_size
matplotlib.rcParams['ytick.labelsize'] = label_size
def xy_2_rtheta(params):
flux, x, y = params[:3]
r = np.sqrt(x ** 2 + y ** 2)
theta = np.rad2deg(np.arctan(x / y))
result = [flux, r, theta]
try:
result.extend(params[3:])
except IndexError:
pass
return result
def boot_ci(boot_images, original_image, cred_mass=0.68, kind=None):
"""
Calculate bootstrap CI.
:param boot_images:
Iterable of 2D numpy arrays with bootstrapped images.
:param original_image:
2D numpy array with original image.
:param kind: (optional)
Type of CI. "asym", "bc" or None. If ``None`` than symmetric one.
(default: ``None``)
:return:
Two numpy arrays with low and high CI borders for each pixel.
"""
images_cube = np.dstack(boot_images)
boot_ci = np.zeros(np.shape(images_cube[:, :, 0]))
mean_boot = np.zeros(np.shape(images_cube[:, :, 0]))
hdi_0 = np.zeros(np.shape(images_cube[:, :, 0]))
hdi_1 = np.zeros(np.shape(images_cube[:, :, 0]))
hdi_low = np.zeros(np.shape(images_cube[:, :, 0]))
hdi_high = np.zeros(np.shape(images_cube[:, :, 0]))
alpha = 1 - cred_mass
print("calculating CI intervals")
if kind == "bc":
for (x, y), value in np.ndenumerate(boot_ci):
hdi_low[x, y] = bc_endpoint(images_cube[x, y, :], original_image[x, y], alpha/2.)
hdi_high[x, y] = bc_endpoint(images_cube[x, y, :], original_image[x, y], 1-alpha/2.)
else:
for (x, y), value in np.ndenumerate(boot_ci):
hdi = hdi_of_sample(images_cube[x, y, :], cred_mass=cred_mass)
boot_ci[x, y] = hdi[1] - hdi[0]
hdi_0[x, y] = hdi[0]
hdi_1[x, y] = hdi[1]
mean_boot[x, y] = np.mean(images_cube[x, y, :])
if kind == 'asym':
hdi_low = original_image - (mean_boot - hdi_0)
hdi_high = original_image + hdi_1 - mean_boot
else:
hdi_low = original_image - boot_ci / 2.
hdi_high = original_image + boot_ci / 2.
return hdi_low, hdi_high
def analyze_bootstrap_samples(dfm_model_fname, booted_mdl_paths,
dfm_model_dir=None, plot_comps=None,
plot_file=None, txt_file=None, cred_mass=0.68,
coordinates='xy', out_samples_path=None,
limits=None, fig=None):
"""
Plot bootstrap distribution of model component parameters.
:param dfm_model_fname:
File name of original difmap model.
:param booted_mdl_paths:
Iterable of paths to bootstrapped difmap models.
:param dfm_model_dir: (optional)
Directory with original difmap model. If ``None`` then CWD. (default:
``None``)
:param plot_comps: (optional)
Iterable of components number to plot on same plot. If ``None`` then
plot parameter distributions of all components.
:param plot_file: (optional)
File to save picture. If ``None`` then don't save picture. (default:
``None``)
:param txt_file: (optional)
File to save credible intervals for parameters. If ``None`` then don't
save credible intervals. (default: ``None``)
:param cred_mass: (optional)
Value of credible interval mass. Float in range (0., 1.). (default:
``0.68``)
:param coordinates: (optional)
Type of coordinates to use. ``xy`` or ``rtheta``. (default: ``xy``)
"""
n_boot = len(booted_mdl_paths)
# Get params of initial model used for bootstrap
comps_orig = import_difmap_model(dfm_model_fname, dfm_model_dir)
comps_params0 = {i: [] for i in range(len(comps_orig))}
for i, comp in enumerate(comps_orig):
# FIXME: Move (x, y) <-> (r, theta) mapping to ``Component``
if coordinates == 'xy':
params = comp.p
elif coordinates == 'rtheta':
params = xy_2_rtheta(comp.p)
else:
raise Exception
comps_params0[i].extend(list(params))
# Load bootstrap models
comps_params = {i: [] for i in range(len(comps_orig))}
for booted_mdl_path in booted_mdl_paths:
path, booted_mdl_file = os.path.split(booted_mdl_path)
comps = import_difmap_model(booted_mdl_file, path)
for i, comp in enumerate(comps):
# FIXME: Move (x, y) <-> (r, theta) mapping to ``Component``
if coordinates == 'xy':
params = comp.p
elif coordinates == 'rtheta':
params = xy_2_rtheta(comp.p)
else:
raise Exception
comps_params[i].extend(list(params))
comps_to_plot = [comps_orig[k] for k in plot_comps]
# (#boot, #parameters)
boot_data = np.hstack( | np.array(comps_params[i]) | numpy.array |
# -*- coding: utf-8 -*-
# @Time : 2021/2/10 11:24 上午
# @Author : <NAME>
# @fileName: SaDE.py
# @Software: PyCharm
# @Blog :https://lesliewongcv.github.io/
import numpy as np
import matplotlib.pyplot as plt
import sys
sys.path.append('../')
import DifferenceEvolution as DE
import GenericAlgorithm as GA
NP = 50
D = 10
CRm = np.random.normal(0.5, 0.1)
X = np.arange(-5, 5, 0.1)
Y = X ** 2
ITER = 100
rep_set = np.zeros(4)
LP = 2
# ITER = 3
def rand_1_mutation(popu_):
offspringSet = np.zeros((NP, D))
for j in range(NP):
f = np.random.normal(0.5, 0.3)
sub_popu = np.delete(popu_, j, axis=0)
rand_sub = np.arange(sub_popu.shape[0])
np.random.shuffle(rand_sub)
offspringSet[j] = sub_popu[rand_sub[4]] + f * (sub_popu[rand_sub[0]] - sub_popu[rand_sub[1]])
return offspringSet
def rand2best_2_mutation(popu_):
offspringSet = np.zeros((NP, D))
for k in range(NP):
f = np.random.normal(0.5, 0.3)
sub_popu = np.delete(popu_, k, axis=0)
rand_sub = np.arange(sub_popu.shape[0])
np.random.shuffle(rand_sub)
offspringSet[k] = sub_popu[rand_sub[4]] + f * (sub_popu[rand_sub[0]] - sub_popu[rand_sub[1]]) \
+ f * (sub_popu[rand_sub[2]] - sub_popu[rand_sub[3]])
return offspringSet
def cur2rand_1_mutation(popu_):
offspringSet = np.zeros((NP, D))
for l in range(NP):
f = np.random.normal(0.5, 0.3)
cur = popu_[l]
sub_popu = np.delete(popu_, l, axis=0)
rand_sub = np.arange(sub_popu.shape[0])
np.random.shuffle(rand_sub)
offspringSet[l] = cur + f * (sub_popu[rand_sub[0]] - cur) \
+ f * (sub_popu[rand_sub[1]] - sub_popu[rand_sub[2]])
return offspringSet
def rand_2_mutation(popu_):
offspringSet = np.zeros((NP, D))
for i in range(NP):
f = np.random.normal(0.5, 0.3)
sub_popu = np.delete(popu_, i, axis=0)
rand_sub = np.arange(sub_popu.shape[0])
np.random.shuffle(rand_sub)
offspringSet[i] = sub_popu[rand_sub[0]] + f * (sub_popu[rand_sub[1]] - sub_popu[rand_sub[2]]) \
+ f * (sub_popu[rand_sub[3]] - sub_popu[rand_sub[4]])
return offspringSet
def crossover(popu, off_v, NP, D, CRm):
off_u = np.copy(popu)
Cr_rand = np.random.rand(NP * 4, D)
Cr = np.random.normal(CRm, 0.1, (4 * NP, D)) # CRi = N(CRm, Std)
Cr_r_index = np.where(Cr_rand <= Cr) # the result of np.where is a tuple including 2 array
off_u[Cr_r_index] = off_v[Cr_r_index]
rand_j = np.random.randint(0, D, size=NP)
for i in range(NP):
off_u[i, rand_j] = popu[i, rand_j]
_ = 1 + 1
return off_u, Cr
def selection(off_u, popu):
popu_fitness = DE.fitness_score(popu, 4 * NP, D)
mask = np.where(DE.fitness_score(off_u, 4 * NP, D) <= popu_fitness)
off_final = np.copy(popu)
off_final[mask] = off_u[mask]
replace_num = len(mask[0])
rep = np.zeros(4)
rep[0] = sum(mask[0] <= 50)
rep[1] = sum(mask[0] <= 100) - rep[0]
rep[2] = sum(mask[0] <= 150) - rep[1] - rep[0]
rep[3] = len(mask[0]) - rep[2] - rep[1] - rep[0]
_ = 1 + 1
return off_final, rep, replace_num, mask
# f = np.random.normal(0.5, 0.3) # randomly sampled from normal distribution and applied to EACH TARGET VECTOR
p = np.random.uniform(0, 1)
p1 = 0.25
p2 = 0.5
p3 = 0.75
popu, popu_ft = DE.init_popu(4 * NP, D)
CRtmp_mean = 0
for i in range(ITER):
for i in range(LP):
off_v1 = rand_1_mutation(popu[:50])
off_v2 = rand2best_2_mutation(popu[50:100])
off_v3 = cur2rand_1_mutation(popu[100:150])
off_v4 = rand_2_mutation(popu[150:])
off_v = np.concatenate((off_v1, off_v2, off_v3, off_v4))
off_u, CRi = crossover(popu, off_v, NP, D, CRm)
off_final, replace, replace_num, replace_CR = selection(off_u, popu)
rep_set += replace
CRtmp_mean += np.mean(CRi[replace_CR])
_ = 2 + 2
p1 = (replace[0]) / replace_num
p2 = (replace[0] + replace[1]) / replace_num
p3 = (replace[0] + replace[1] + replace[2]) / replace_num
p = | np.random.uniform(0, 1) | numpy.random.uniform |
# -*- coding: utf-8 -*-
# Copyright 2019 the HERA Project
# Licensed under the MIT License
import pytest
import os
import shutil
import numpy as np
import sys
from collections import OrderedDict as odict
import copy
import glob
from pyuvdata import UVData
from pyuvdata import utils as uvutils
import unittest
from scipy import stats
from scipy import constants
from pyuvdata import UVFlag
from .. import datacontainer, io, frf
from ..data import DATA_PATH
@pytest.mark.filterwarnings("ignore:The default for the `center` keyword has changed")
def test_timeavg_waterfall():
fname = os.path.join(DATA_PATH, "zen.2458042.12552.xx.HH.uvXA")
uvd = UVData()
uvd.read_miriad(fname)
d = uvd.get_data(24, 25)
f = uvd.get_flags(24, 25)
n = uvd.get_nsamples(24, 25)
t = np.unique(uvd.time_array)
fr = uvd.freq_array.squeeze()
lsts = []
for _l in uvd.lst_array:
if _l not in lsts:
lsts.append(_l)
lsts = np.array(lsts)
antpos, ants = uvd.get_ENU_antpos()
blv = antpos[ants.tolist().index(24)] - antpos[ants.tolist().index(25)]
# test basic execution
ad, af, an, al, aea = frf.timeavg_waterfall(d, 25, verbose=False)
assert ad.shape == (3, 64)
assert af.shape == (3, 64)
assert an.shape == (3, 64)
assert not np.any(af)
assert np.allclose(an[1, 0], 25.0)
assert np.allclose(an[2, 0], 10.0)
# test rephase
ad, af, an, al, aea = frf.timeavg_waterfall(d, 25, flags=f, rephase=True, lsts=lsts, freqs=fr, bl_vec=blv,
nsamples=n, extra_arrays=dict(times=t), verbose=False)
assert ad.shape == (3, 64)
assert af.shape == (3, 64)
assert an.shape == (3, 64)
assert np.any(af)
assert len(al) == 3
assert len(aea['avg_times']) == 3
assert np.allclose(an.max(), 25.0)
# test various Navgs
ad, af, an, al, aea = frf.timeavg_waterfall(d, 1, flags=f, rephase=True, lsts=lsts, freqs=fr, bl_vec=blv,
nsamples=n, extra_arrays=dict(times=t), verbose=False)
assert ad.shape == (60, 64)
ad, af, an, al, aea = frf.timeavg_waterfall(d, 60, flags=f, rephase=True, lsts=lsts, freqs=fr, bl_vec=blv,
nsamples=n, extra_arrays=dict(times=t), verbose=False)
assert ad.shape == (1, 64)
# wrap lst
ad2, af2, an2, al2, aea2 = frf.timeavg_waterfall(d, 60, flags=f, rephase=True, lsts=lsts + 1.52917804, freqs=fr, bl_vec=blv,
nsamples=n, extra_arrays=dict(times=t), verbose=False)
assert ad.shape == (1, 64)
assert np.allclose(ad, ad2)
assert np.allclose(al, al2 - 1.52917804)
# Test Error
with pytest.raises(ValueError):
frf.timeavg_waterfall(d, 25, verbose=False, wgt_by_nsample=True, wgt_by_favg_nsample=True)
# test weightings
d = np.ones((4, 10))
d[0, :] *= 2
n = np.ones((4, 10))
n[0, 0:5] *= 2
ad, _, _, _, _ = frf.timeavg_waterfall(d, 2, rephase=False, nsamples=n, wgt_by_nsample=True)
np.testing.assert_array_equal(ad[1, :], 1.0)
np.testing.assert_array_equal(ad[0, 0:5], 5. / 3)
np.testing.assert_array_equal(ad[0, 5:10], 1.5)
ad, _, _, _, _ = frf.timeavg_waterfall(d, 2, rephase=False, nsamples=n, wgt_by_nsample=False, wgt_by_favg_nsample=True)
np.testing.assert_array_equal(ad[1, :], 1.0)
np.testing.assert_array_equal(ad[0, :], 1.6)
def test_fir_filtering():
# convert a high-pass frprofile to an FIR filter
frbins = np.linspace(-40e-3, 40e-3, 1024)
frp = np.ones(1024)
frp[512 - 9:512 + 10] = 0.0
fir, tbins = frf.frp_to_fir(frp, delta_bin=np.diff(frbins)[0])
# confirm its purely real
assert not np.any(np.isclose(np.abs(fir.real), 0.0))
assert np.allclose(np.abs(fir.imag), 0.0)
# convert back
_frp, _frbins = frf.frp_to_fir(fir, delta_bin=np.diff(tbins)[0], undo=True)
np.testing.assert_array_almost_equal(frp, _frp.real)
np.testing.assert_array_almost_equal(np.diff(frbins), np.diff(_frbins))
assert np.allclose(np.abs(_frp.imag), 0.0)
# test noise averaging properties
frp = np.zeros(1024)
frp[512] = 1.0
t_ratio = frf.fr_tavg(frp)
assert np.allclose(t_ratio, 1024)
@pytest.mark.filterwarnings("ignore:The default for the `center` keyword has changed")
class Test_FRFilter(object):
def setup_method(self):
self.fname = os.path.join(DATA_PATH, "zen.2458042.12552.xx.HH.uvXA")
self.F = frf.FRFilter(self.fname, filetype='miriad')
self.F.read()
def test_timeavg_data(self):
# test basic time average
self.F.timeavg_data(self.F.data, self.F.times, self.F.lsts, 35, rephase=True, keys=[(24, 25, 'ee')])
assert self.F.Navg == 3
assert len(self.F.avg_data) == 1
assert self.F.avg_data[(24, 25, 'ee')].shape == (20, 64)
# test full time average and overwrite
self.F.timeavg_data(self.F.data, self.F.times, self.F.lsts, 1e10, rephase=True, verbose=False, overwrite=False)
assert self.F.Navg == 60
assert len(self.F.avg_data) == 28
assert self.F.avg_data[(24, 25, 'ee')].shape == (20, 64)
assert self.F.avg_data[(24, 37, 'ee')].shape == (1, 64)
# test weight by nsample
F = copy.deepcopy(self.F)
k = (24, 25, 'ee')
F.nsamples[k][:3] = 0.0
F.timeavg_data(F.data, F.times, F.lsts, 35, nsamples=F.nsamples, keys=[k], overwrite=True,
wgt_by_nsample=True)
assert np.all(np.isclose(F.avg_data[k][0], 0.0)) # assert data is zero b/c I zeroed nsample
assert np.all(np.isclose(F.avg_nsamples[k][0], 0.0)) # assert avg_nsample is also zero
assert np.all(np.isclose(F.avg_nsamples[k][1:], 3.0)) # assert non-zeroed nsample is 3
# repeat without nsample wgt
F.timeavg_data(F.data, F.times, F.lsts, 35, nsamples=F.nsamples, keys=[k], overwrite=True,
wgt_by_nsample=False)
assert not np.any(np.isclose(F.avg_data[k][0, 5:-5], 0.0)) # assert non-edge data is now not zero
assert np.all(np.isclose(F.avg_nsamples[k][0], 0.0)) # avg_nsample should still be zero
# exceptions
pytest.raises(AssertionError, self.F.timeavg_data, self.F.data, self.F.times, self.F.lsts, 1.0)
def test_filter_data(self):
# construct high-pass filter
frates = np.fft.fftshift(np.fft.fftfreq(self.F.Ntimes, self.F.dtime)) * 1e3
w = np.ones((self.F.Ntimes, self.F.Nfreqs), dtype=np.float)
w[np.abs(frates) < 20] = 0.0
frps = datacontainer.DataContainer(dict([(k, w) for k in self.F.data]))
# make gaussian random noise
bl = (24, 25, 'ee')
window = 'blackmanharris'
ec = 0
np.random.seed(0)
self.F.data[bl] = np.reshape(stats.norm.rvs(0, 1, self.F.Ntimes * self.F.Nfreqs)
+ 1j * stats.norm.rvs(0, 1, self.F.Ntimes * self.F.Nfreqs), (self.F.Ntimes, self.F.Nfreqs))
# fr filter noise
self.F.filter_data(self.F.data, frps, overwrite=True, verbose=False, axis=0, keys=[bl])
# check key continue w/ ridiculous edgecut
self.F.filter_data(self.F.data, frps, overwrite=False, verbose=False, keys=[bl], edgecut_low=100, axis=0)
# fft
self.F.fft_data(data=self.F.data, assign='dfft', ax='freq', window=window, edgecut_low=ec, edgecut_hi=ec, overwrite=True)
self.F.fft_data(data=self.F.filt_data, assign='rfft', ax='freq', window=window, edgecut_low=ec, edgecut_hi=ec, overwrite=True)
# ensure drop in noise power is reflective of frf_nsamples
dfft = np.mean(np.abs(self.F.dfft[bl]), axis=0)
rfft = np.mean(np.abs(self.F.rfft[bl]), axis=0)
r = np.mean(dfft / rfft)
assert np.allclose(r, np.sqrt(np.mean(self.F.filt_nsamples[bl])), atol=1e-1)
def test_write_data(self):
self.F.timeavg_data(self.F.data, self.F.times, self.F.lsts, 35, rephase=False, verbose=False)
self.F.write_data(self.F.avg_data, "./out.uv", filetype='miriad', overwrite=True,
add_to_history='testing', times=self.F.avg_times, lsts=self.F.avg_lsts)
assert os.path.exists("./out.uv")
hd = io.HERAData('./out.uv', filetype='miriad')
hd.read()
assert 'testing' in hd.history.replace('\n', '').replace(' ', '')
assert 'Thisfilewasproducedbythefunction' in hd.history.replace('\n', '').replace(' ', '')
shutil.rmtree("./out.uv")
pytest.raises(AssertionError, self.F.write_data, self.F.avg_data, "./out.uv", times=self.F.avg_times)
pytest.raises(ValueError, self.F.write_data, self.F.data, "hi", filetype='foo')
def test_time_avg_data_and_write(self, tmpdir):
# time-averaged data written too file will be compared to this.
tmp_path = tmpdir.strpath
output = tmp_path + '/test_output.miriad'
flag_output = tmp_path + '/test_output.flags.h5'
self.F.timeavg_data(self.F.data, self.F.times, self.F.lsts, 35., rephase=True, overwrite=True,
wgt_by_nsample=True, flags=self.F.flags, nsamples=self.F.nsamples)
frf.time_avg_data_and_write(self.fname, output, t_avg=35., rephase=True, wgt_by_nsample=True, flag_output=flag_output, filetype='miriad')
data_out = frf.FRFilter(output, filetype='miriad')
data_out.read()
for k in data_out.data:
assert np.allclose(data_out.data[k], self.F.avg_data[k])
assert np.allclose(data_out.flags[k], self.F.avg_flags[k])
assert np.allclose(data_out.nsamples[k], self.F.avg_nsamples[k])
def test_time_avg_data_and_write_baseline_list(self, tmpdir):
# compare time averaging over baseline list versus time averaging
# without baseline list.
tmp_path = tmpdir.strpath
uvh5s = sorted(glob.glob(DATA_PATH + '/zen.2458045.*.uvh5'))
output_files = []
for file in uvh5s:
baseline_list = io.baselines_from_filelist_position(file, uvh5s)
output = tmp_path + '/' + file.split('/')[-1]
output_files.append(output)
output_flags = tmp_path + '/' + file.split('/')[-1].replace('.uvh5', '.flags.h5')
with pytest.warns(RuntimeWarning):
frf.time_avg_data_and_write(baseline_list=[], flag_output=output_flags,
input_data_list=uvh5s, rephase=True,
output_data=output, t_avg=35., wgt_by_nsample=True)
frf.time_avg_data_and_write(baseline_list=baseline_list, flag_output=output_flags,
input_data_list=uvh5s, rephase=True,
output_data=output, t_avg=35., wgt_by_nsample=True)
# now do everything at once:
output = tmp_path + '/combined.uvh5'
frf.time_avg_data_and_write(uvh5s, output, t_avg=35., rephase=True, wgt_by_nsample=True)
data_out = frf.FRFilter(output)
data_out_bls = frf.FRFilter(output_files)
data_out.read()
data_out_bls.read()
# check that data, flags, nsamples are all close.
for k in data_out.data:
assert np.all(np.isclose(data_out.data[k], data_out_bls.data[k]))
assert np.all(np.isclose(data_out.flags[k], data_out_bls.flags[k]))
assert np.all(np.isclose(data_out.nsamples[k], data_out_bls.nsamples[k]))
def test_time_average_argparser_multifile(self):
sys.argv = [sys.argv[0], "first.uvh5", "second.uvh5", "output.uvh5", "--cornerturnfile", "input.uvh5", "--t_avg", "35.", "--rephase"]
ap = frf.time_average_argparser()
args = ap.parse_args()
assert args.cornerturnfile == "input.uvh5"
assert args.output_data == "output.uvh5"
assert args.input_data_list == ['first.uvh5', 'second.uvh5']
assert args.t_avg == 35.
assert not args.clobber
assert not args.verbose
assert args.flag_output is None
assert args.filetype == "uvh5"
def test_tophat_frfilter(self):
fname = os.path.join(DATA_PATH, "zen.2458043.12552.xx.HH.uvORA")
k = (24, 25, 'ee')
frfil = frf.FRFilter(fname, filetype='miriad')
frfil.read(bls=[k])
bl = np.linalg.norm(frfil.antpos[24] - frfil.antpos[25]) / constants.c * 1e9
sdf = (frfil.freqs[1] - frfil.freqs[0]) / 1e9
frfil.tophat_frfilter(tol=1e-2, output_prefix='frfiltered')
for k in frfil.data.keys():
assert frfil.frfiltered_resid[k].shape == (60, 64)
assert frfil.frfiltered_model[k].shape == (60, 64)
assert k in frfil.frfiltered_info
# test skip_wgt imposition of flags
fname = os.path.join(DATA_PATH, "zen.2458043.12552.xx.HH.uvORA")
k = (24, 25, 'ee')
# check successful run when avg_red_bllens is True and when False.
for avg_red_bllens in [True, False]:
frfil = frf.FRFilter(fname, filetype='miriad')
frfil.read(bls=[k])
if avg_red_bllens:
frfil.avg_red_baseline_vectors()
wgts = {k: np.ones_like(frfil.flags[k], dtype=np.float)}
wgts[k][:, 0] = 0.0
frfil.tophat_frfilter(keys=[k], wgts=wgts, tol=1e-5, window='blackman-harris', skip_wgt=0.1, maxiter=100)
assert frfil.clean_info[k][(0, frfil.Nfreqs)]['status']['axis_0'][0] == 'skipped'
np.testing.assert_array_equal(frfil.clean_flags[k][:, 0], np.ones_like(frfil.flags[k][:, 0]))
np.testing.assert_array_equal(frfil.clean_model[k][:, 0], np.zeros_like(frfil.clean_resid[k][:, 0]))
np.testing.assert_array_equal(frfil.clean_resid[k][:, 0], np.zeros_like(frfil.clean_resid[k][:, 0]))
def test_load_tophat_frfilter_and_write_baseline_list(self, tmpdir):
tmp_path = tmpdir.strpath
uvh5 = [os.path.join(DATA_PATH, "test_input/zen.2458101.46106.xx.HH.OCR_53x_54x_only.first.uvh5"),
os.path.join(DATA_PATH, "test_input/zen.2458101.46106.xx.HH.OCR_53x_54x_only.second.uvh5")]
cals = [os.path.join(DATA_PATH, "test_input/zen.2458101.46106.xx.HH.uv.abs.calfits_54x_only.part1"),
os.path.join(DATA_PATH, "test_input/zen.2458101.46106.xx.HH.uv.abs.calfits_54x_only.part2")]
outfilename = os.path.join(tmp_path, 'temp.h5')
cdir = os.path.join(tmp_path, 'cache_temp')
# make a cache directory
if os.path.isdir(cdir):
shutil.rmtree(cdir)
os.mkdir(cdir)
# test graceful exit with baseline list length of zero.
with pytest.warns(RuntimeWarning):
frf.load_tophat_frfilter_and_write(datafile_list=uvh5, baseline_list=[],
calfile_list=cals, spw_range=[100, 200], cache_dir=cdir,
read_cache=True, write_cache=True, avg_red_bllens=True,
res_outfilename=outfilename, clobber=True,
mode='dayenu')
for avg_bl in [True, False]:
frf.load_tophat_frfilter_and_write(datafile_list=uvh5, baseline_list=[(53, 54)], polarizations=['ee'],
calfile_list=cals, spw_range=[100, 200], cache_dir=cdir,
read_cache=True, write_cache=True, avg_red_bllens=avg_bl,
res_outfilename=outfilename, clobber=True,
mode='dayenu')
hd = io.HERAData(outfilename)
d, f, n = hd.read()
assert len(list(d.keys())) == 1
assert d[(53, 54, 'ee')].shape[1] == 100
assert d[(53, 54, 'ee')].shape[0] == 60
# now do no spw range and no cal files just to cover those lines.
frf.load_tophat_frfilter_and_write(datafile_list=uvh5, baseline_list=[(53, 54)], polarizations=['ee'],
cache_dir=cdir,
read_cache=True, write_cache=True, avg_red_bllens=avg_bl,
res_outfilename=outfilename, clobber=True,
mode='dayenu')
hd = io.HERAData(outfilename)
d, f, n = hd.read()
assert len(list(d.keys())) == 1
assert d[(53, 54, 'ee')].shape[1] == 1024
assert d[(53, 54, 'ee')].shape[0] == 60
# now test flag factorization and time thresholding.
# prepare an input files for broadcasting flags
uvh5 = os.path.join(DATA_PATH, "test_input/zen.2458101.46106.xx.HH.OCR_53x_54x_only.uvh5")
input_file = os.path.join(tmp_path, 'temp_special_flags.h5')
shutil.copy(uvh5, input_file)
hd = io.HERAData(input_file)
_, flags, _ = hd.read()
ntimes_before = hd.Ntimes
nfreqs_before = hd.Nfreqs
freqs_before = hd.freqs
times_before = hd.times
for bl in flags:
flags[bl][:] = False
flags[bl][0, :hd.Nfreqs // 2] = True # first time has 50% flagged
flags[bl][-3:, -1] = True # last channel has flags for three integrations
hd.update(flags=flags)
hd.write_uvh5(input_file, clobber=True)
# this time_threshold will result in
# entire first integration begin flagged
# and entire final channel being flagged
# when flags are broadcasted.
time_thresh = 2. / hd.Ntimes
for blnum, bl in enumerate(flags.keys()):
outfilename = os.path.join(tmp_path, 'bl_chunk_%d.h5' % blnum)
frf.load_tophat_frfilter_and_write(datafile_list=[input_file], res_outfilename=outfilename,
tol=1e-4, baseline_list=[bl[:2]], polarizations=[bl[-1]],
cache_dir=cdir,
factorize_flags=True,
time_thresh=time_thresh, clobber=True)
# now load all of the outputs in
output_files = glob.glob(tmp_path + '/bl_chunk_*.h5')
hd = io.HERAData(output_files)
d, f, n = hd.read()
hd_original = io.HERAData(uvh5)
for bl in hd_original.bls:
assert bl in d.keys()
for bl in f:
assert np.all(f[bl][:, -1])
assert np.all(f[bl][0, :])
# test apriori flags and flag_yaml
flag_yaml = os.path.join(DATA_PATH, 'test_input/a_priori_flags_sample.yaml')
uvf = UVFlag(hd, mode='flag', copy_flags=True)
uvf.to_waterfall(keep_pol=False, method='and')
uvf.flag_array[:] = False
flagfile = os.path.join(tmp_path, 'test_flag.h5')
uvf.write(flagfile, clobber=True)
frf.load_tophat_frfilter_and_write(datafile_list=[input_file], res_outfilename=outfilename,
tol=1e-4, baseline_list=[bl[:2]], polarizations=[bl[-1]],
clobber=True, mode='dayenu',
external_flags=flagfile, overwrite_flags=True)
# test that all flags are False
hd = io.HERAData(outfilename)
d, f, n = hd.read()
for k in f:
assert np.all(~f[k])
# now do the external yaml
frf.load_tophat_frfilter_and_write(datafile_list=[input_file], res_outfilename=outfilename,
tol=1e-4, baseline_list=[bl[:2]], polarizations=[bl[-1]],
clobber=True, mode='dayenu',
external_flags=flagfile, overwrite_flags=True,
flag_yaml=flag_yaml)
# test that all flags are af yaml flags
hd = io.HERAData(outfilename)
d, f, n = hd.read()
for k in f:
assert np.all(f[k][:, 0])
assert np.all(f[k][:, 1])
assert np.all(f[k][:, 10:20])
assert np.all(f[k][:, 60])
os.remove(outfilename)
shutil.rmtree(cdir)
def test_load_tophat_frfilter_and_write_multifile(self, tmpdir):
# cover line where baseline-list is None and multiple files are provided.
uvh5s = sorted(glob.glob(DATA_PATH + '/zen.2458045.*.uvh5'))
tmp_path = tmpdir.strpath
outfilename = os.path.join(tmp_path, 'temp_output.uvh5')
frf.load_tophat_frfilter_and_write(uvh5s, filled_outfilename=outfilename, tol=1e-4, clobber=True)
hd = io.HERAData(uvh5s)
d, f, n = hd.read()
hdoutput = io.HERAData(outfilename)
doutput, foutput, nouput = hdoutput.read()
for k in doutput:
assert doutput[k].shape == d[k].shape
def test_load_tophat_frfilter_and_write(self, tmpdir):
tmp_path = tmpdir.strpath
uvh5 = os.path.join(DATA_PATH, "test_input/zen.2458101.46106.xx.HH.OCR_53x_54x_only.uvh5")
outfilename = os.path.join(tmp_path, 'temp.h5')
CLEAN_outfilename = os.path.join(tmp_path, 'temp_clean.h5')
filled_outfilename = os.path.join(tmp_path, 'temp_filled.h5')
frf.load_tophat_frfilter_and_write(uvh5, res_outfilename=outfilename, tol=1e-4, clobber=True, Nbls_per_load=1)
hd = io.HERAData(outfilename)
d, f, n = hd.read(bls=[(53, 54, 'ee')])
for bl in d:
assert not np.all(np.isclose(d[bl], 0.))
frfil = frf.FRFilter(uvh5, filetype='uvh5')
frfil.read(bls=[(53, 54, 'ee')])
frfil.tophat_frfilter(keys=[(53, 54, 'ee')], tol=1e-4, verbose=True)
np.testing.assert_almost_equal(d[(53, 54, 'ee')], frfil.clean_resid[(53, 54, 'ee')], decimal=5)
np.testing.assert_array_equal(f[(53, 54, 'ee')], frfil.flags[(53, 54, 'ee')])
# test NotImplementedError
pytest.raises(NotImplementedError, frf.load_tophat_frfilter_and_write, uvh5, res_outfilename=outfilename, tol=1e-4,
clobber=True, Nbls_per_load=1, avg_red_bllens=True, baseline_list=[(54, 54)], polarizations=['ee'])
# test loading and writing all baselines at once.
uvh5 = os.path.join(DATA_PATH, "test_input/zen.2458101.46106.xx.HH.OCR_53x_54x_only.uvh5")
outfilename = os.path.join(tmp_path, 'temp.h5')
for avg_bl in [True, False]:
frf.load_tophat_frfilter_and_write(uvh5, res_outfilename=outfilename, tol=1e-4, clobber=True,
Nbls_per_load=None, avg_red_bllens=avg_bl)
hd = io.HERAData(outfilename)
d, f, n = hd.read(bls=[(53, 54, 'ee')])
for bl in d:
assert not np.all(np.isclose(d[bl], 0.))
frfil = frf.FRFilter(uvh5, filetype='uvh5')
frfil.read(bls=[(53, 54, 'ee')])
frfil.tophat_frfilter(keys=[(53, 54, 'ee')], tol=1e-4, verbose=True)
np.testing.assert_almost_equal(d[(53, 54, 'ee')], frfil.clean_resid[(53, 54, 'ee')], decimal=5)
np.testing.assert_array_equal(f[(53, 54, 'ee')], frfil.flags[(53, 54, 'ee')])
cal = os.path.join(DATA_PATH, "test_input/zen.2458101.46106.xx.HH.uv.abs.calfits_54x_only")
outfilename = os.path.join(tmp_path, 'temp.h5')
os.remove(outfilename)
for avg_bl in [True, False]:
frf.load_tophat_frfilter_and_write(uvh5, calfile_list=cal, tol=1e-4, res_outfilename=outfilename,
Nbls_per_load=2, clobber=True, avg_red_bllens=avg_bl)
hd = io.HERAData(outfilename)
assert 'Thisfilewasproducedbythefunction' in hd.history.replace('\n', '').replace(' ', '')
d, f, n = hd.read()
for bl in d:
if not np.all(f[bl]):
assert not np.all(np.isclose(d[bl], 0.))
np.testing.assert_array_equal(f[(53, 54, 'ee')], True)
os.remove(outfilename)
# test skip_autos
frf.load_tophat_frfilter_and_write(uvh5, calfile_list=None, tol=1e-4, res_outfilename=outfilename,
filled_outfilename=filled_outfilename, CLEAN_outfilename=CLEAN_outfilename,
Nbls_per_load=2, clobber=True, avg_red_bllens=avg_bl, skip_autos=True)
hd = io.HERAData(outfilename)
d, f, n = hd.read()
hd_original = io.HERAData(uvh5)
do, fo, no = hd_original.read()
chd = io.HERAData(CLEAN_outfilename)
cd, cf, cn = chd.read()
fhd = io.HERAData(filled_outfilename)
fd, ff, fn = fhd.read()
# test that the resids are are equal to original data.
for bl in do:
if bl[0] == bl[1]:
assert np.allclose(do[bl], d[bl]) # check that resid equals original data.
assert np.allclose(fo[bl], f[bl])
assert np.allclose(no[bl], n[bl])
assert np.allclose(cd[bl], np.zeros_like(cd[bl])) # check that all model values are zero.
assert np.allclose(fd[bl][~f[bl]], d[bl][~f[bl]]) # check that filled data equals original data.
else:
assert not np.allclose(do[bl], d[bl])
assert np.allclose(no[bl], n[bl])
# prepare an input file for broadcasting flags
input_file = os.path.join(tmp_path, 'temp_special_flags.h5')
shutil.copy(uvh5, input_file)
hd = io.HERAData(input_file)
_, flags, _ = hd.read()
ntimes_before = hd.Ntimes
nfreqs_before = hd.Nfreqs
freqs_before = hd.freqs
times_before = hd.times
for bl in flags:
flags[bl][:] = False
flags[bl][0, :hd.Nfreqs // 2] = True # first time has 50% flagged
flags[bl][-3:, -1] = True # last channel has flags for three integrations
hd.update(flags=flags)
hd.write_uvh5(input_file, clobber=True)
# this time_threshold will result in
# entire first integration begin flagged
# and entire final channel being flagged
# when flags are broadcasted.
time_thresh = 2. / hd.Ntimes
frf.load_tophat_frfilter_and_write(input_file, res_outfilename=outfilename, tol=1e-4,
factorize_flags=True, time_thresh=time_thresh, clobber=True)
hd = io.HERAData(outfilename)
d, f, n = hd.read(bls=[(53, 54, 'ee')])
for bl in f:
assert np.any(f[bl][:, :-1])
assert np.all(f[bl][0, :])
# test delay filtering and writing with factorized flags and partial i/o
frf.load_tophat_frfilter_and_write(input_file, res_outfilename=outfilename, tol=1e-4,
factorize_flags=True, time_thresh=time_thresh, clobber=True)
hd = io.HERAData(outfilename)
d, f, n = hd.read(bls=[(53, 54, 'ee')])
for bl in f:
# check that flags were broadcasted.
assert np.all(f[bl][0, :])
assert np.all(f[bl][:, -1])
assert not np.all(np.isclose(d[bl], 0.))
frf.load_tophat_frfilter_and_write(input_file, res_outfilename=outfilename, tol=1e-4, Nbls_per_load=1,
factorize_flags=True, time_thresh=time_thresh, clobber=True)
hd = io.HERAData(outfilename)
d, f, n = hd.read(bls=[(53, 54, 'ee')])
for bl in f:
# check that flags were broadcasted.
assert | np.all(f[bl][0, :]) | numpy.all |
import unittest
import matplotlib.pyplot as plt
import numpy as np
import random
from keras.models import Model
from keras.layers import Input
from keras.optimizers import Adam
from CNNTripletModel import build_network, build_model, TripletLossLayer
class TestCNNTripletModel(unittest.TestCase):
# Test the Triplet Loss Layer, that it returns proper distance
def test_Triplet_Loss_Layer_Functionality(self):
input_shape = (28,28,1)
anch = np.random.rand(1,28,28,1)
pos = np.random.rand(1,28,28,1)
neg = np.random.rand(1,28,28,1)
triplets = [anch, pos, neg]
network = build_network(input_shape, embeddingsize=10)
network_train = build_model(input_shape, network)
optimizer = Adam(lr=0.00006)
network_train.compile(loss=None, optimizer=optimizer)
value = round(network_train.predict(triplets),5)
emebed_a = network(anch)
emebed_p = network(pos)
emebed_n = network(neg)
p_dist = np.sum(np.square(emebed_a - emebed_p), axis=-1)
n_dist = np.sum(np.square(emebed_a - emebed_n), axis=-1)
expected_value = round(np.sum(np.maximum(p_dist - n_dist + 0.2, 0), axis=0),6)
self.assertTrue(abs(value-expected_value) < 0.0001)
# Test that model has proper input shape
def test_input_shape(self):
input_shape = (28,28,1)
network = build_network(input_shape, embeddingsize=10)
network_train = build_model(input_shape, network)
optimizer = Adam(lr=0.00006)
network_train.compile(loss=None, optimizer=optimizer)
value = network_train.input_shape
expected_value = [(None,28,28,1)] * 3
for i in range(3):
self.assertTrue(value[i] == expected_value[i])
# Test that model has proper output shape
def test_output_shape(self):
input_shape = (28,28,1)
network = build_network(input_shape, embeddingsize=10)
value = network.output_shape
expected_value = (None,10)
self.assertTrue(value == expected_value)
# Test that model can be trained and loss changes
def test_loss_changes(self):
random.seed(123)
input_shape = (28,28,1)
anch = np.random.rand(10,28,28,1)
pos = np.random.rand(10,28,28,1)
neg = np.random.rand(10,28,28,1)
triplets = [anch, pos, neg]
network = build_network(input_shape, embeddingsize=10)
network_train = build_model(input_shape, network)
optimizer = Adam(lr=0.00006)
network_train.compile(loss=None, optimizer=optimizer)
loss = []
for i in range(3):
loss.append(network_train.train_on_batch(triplets, None))
value = loss[0]- loss[1]
self.assertTrue(value != 0)
# Test that the loss does not go to zero
def test_no_zero_loss(self):
in_tensor = tf.placeholder(tf.float32, (None, 3))
labels = tf.placeholder(tf.int32, None, 1)
model = Model(in_tensor, labels)
sess = tf.Session()
loss = sess.run(model.loss, feed_dict={
in_tensor: | np.ones(1, 3) | numpy.ones |
import numpy as np
from munch import unmunchify
from tensorboardX import SummaryWriter
from stable_baselines3.common.monitor import Monitor
class CoordMonitor(Monitor):
REQUEST_KEYS = ['accepts', 'requests', 'num_invalid', 'num_rejects', 'no_egress_route', 'no_extension', 'skipped_on_arrival']
ACCEPTED_KEYS = ['cum_service_length', 'cum_route_hops', 'cum_datarate', 'cum_max_latency', 'cum_resd_latency']
ACCEPTED_VALS = ['mean_service_len', 'mean_hops', 'mean_datarate', 'mean_latency', 'mean_resd_latency']
def __init__(self, episode, tag, env, filename=None, allow_early_resets=True, reset_keywords=(), infor_keywords=()):
super().__init__(env, None, allow_early_resets, reset_keywords, infor_keywords)
self.writer = SummaryWriter(filename)
self.episode = episode
self.tag = tag
self.reset()
def close(self):
self.writer.flush()
self.writer.close()
super().close()
def reset(self, **kwargs):
self.c_util, self.m_util, self.d_util = [], [], []
return super().reset(**kwargs)
def step(self, action):
obs, reward, done, info = super().step(action)
for service in range(len(self.env.services)):
logs = unmunchify(self.env.info[service])
for key in self.REQUEST_KEYS:
scalar = logs[key] / self.env.num_requests
tag = f'{self.tag}/{service}/{key}'
self.writer.add_scalar(tag, scalar, self.episode)
accepts = logs['accepts'] if logs['accepts'] > 0 else np.inf
for key in self.ACCEPTED_KEYS:
scalar = logs[key] / accepts
tag = f'{self.tag}/{service}/{key}'
self.writer.add_scalar(tag, scalar, self.episode)
self.update_utilization()
return obs, reward, done, info
def update_utilization(self):
nodes = self.env.net.nodes
cutil = [1 - self.env.computing[n] / self.env.net.nodes[n]['compute'] for n in nodes]
mutil = [1 - self.env.memory[n] / self.env.net.nodes[n]['memory'] for n in nodes]
cutil = np.mean(cutil)
mutil = np.mean(mutil)
edges = self.env.net.edges
max_cap = [self.env.net.edges[e]['datarate'] for e in edges]
edges = [frozenset({*e}) for e in edges]
cap = [self.env.datarate[e] for e in edges]
dutil = 1 - np.asarray(cap) / | np.asarray(max_cap) | numpy.asarray |
""" Pyaccel tracking module
This module concentrates all tracking routines of the accelerator.
Most of them take a structure called 'positions' as an argument which
should store the initial coordinates of the particle, or the bunch of particles
to be tracked. Most of these routines generate tracked particle positions as
output, among other data. These input and ouput particle positions are stored
as native python lists or numpy arrays. Depending on the routine these position
objects may have one, two, three or four indices. These indices are used to
select particle's inices (p), coordinates(c), lattice element indices(e) or
turn number (n). For example, v = pos[p,c,e,n]. Routines in these module may
return particle positions structure missing one or more indices but the
PCEN ordering is preserved.
"""
import multiprocessing as _multiproc
import numpy as _np
import trackcpp as _trackcpp
import mathphys as _mp
from . import accelerator as _accelerator
from . import utils as _utils
from .utils import interactive as _interactive
LOST_PLANES = (None, 'x', 'y', 'z')
class TrackingException(Exception):
"""."""
@_interactive
def generate_bunch(emitx, emity, sigmae, sigmas, twi, n_part, cutoff=3):
"""
Create centered bunch with the desired equilibrium and twiss params.
Inputs:
emitx = horizontal emittance;
emity = vertical emittance;
sigmae = energy dispersion;
sigmas = bunch length;
twi = TwissObject at the desired location
n_part = number of particles
cutoff = number of sigmas to cut the distribution (in bunch size)
Output:
particles = numpy.array.shape == (6, n_part)
"""
# generate longitudinal phase space
parts = _mp.functions.generate_random_numbers(
2*n_part, dist_type='norm', cutoff=cutoff)
p_en = sigmae * parts[:n_part]
p_s = sigmas * parts[n_part:]
# generate transverse phase space
parts = _mp.functions.generate_random_numbers(
2*n_part, dist_type='exp', cutoff=cutoff*cutoff/2)
ampx = _np.sqrt(emitx * 2*parts[:n_part])
ampy = _np.sqrt(emity * 2*parts[n_part:])
parts = _mp.functions.generate_random_numbers(
2*n_part, dist_type='unif', cutoff=cutoff)
phx = _np.pi * parts[:n_part]
phy = _np.pi * parts[n_part:]
p_x = ampx*_np.sqrt(twi.betax)
p_y = ampy*_np.sqrt(twi.betay)
p_x *= _np.cos(phx)
p_y *= _np.cos(phy)
p_x += twi.etax * p_en
p_y += twi.etay * p_en
p_xp = -ampx/_np.sqrt(twi.betax)
p_yp = -ampy/_np.sqrt(twi.betay)
p_xp *= twi.alphax*_np.cos(phx) + _np.sin(phx)
p_yp *= twi.alphay*_np.cos(phy) + _np.sin(phy)
p_xp += twi.etapx * p_en
p_yp += twi.etapy * p_en
return _np.array((p_x, p_xp, p_y, p_yp, p_en, p_s))
@_interactive
def set_4d_tracking(accelerator):
accelerator.cavity_on = False
accelerator.radiation_on = False
@_interactive
def set_6d_tracking(accelerator):
accelerator.cavity_on = True
accelerator.radiation_on = True
@_interactive
def element_pass(element, particles, energy, **kwargs):
"""Track particle(s) through an element.
Accepts one or multiple particles initial positions. In the latter case,
a list of particles or numpy 2D array (with particle as second index)
should be given as input; also, outputs get an additional dimension,
with particle as second index.
Keyword arguments:
element -- 'Element' object
particles -- initial 6D particle(s) position(s)
ex.1: particles = [rx,px,ry,py,de,dl]
ex.3: particles = numpy.zeros((6, Np))
energy -- energy of the beam [eV]
harmonic_number -- harmonic number of the lattice (optional, defaul=1)
cavity_on -- cavity on state (True/False) (optional, defaul=False)
radiation_on -- radiation on state (True/False) (optional, defaul=False)
vchamber_on -- vacuum chamber on state (True/False) (optional,
defaul=False)
Returns:
part_out -- a numpy array with tracked 6D position(s) of the particle(s).
If elementpass is invoked for a single particle then 'part_out' is a
simple vector with one index that refers to the particle coordinates.
If 'particles' represents many particles, the first index of
'part_out' selects the coordinate and the second index selects the
particle.
Raises TrackingException
"""
# checks if all necessary arguments have been passed
kwargs['energy'] = energy
# creates accelerator for tracking
accelerator = _accelerator.Accelerator(**kwargs)
# checks whether single or multiple particles
p_in, _ = _process_args(accelerator, particles)
# tracks through the list of pos
ret = _trackcpp.track_elementpass_wrapper(
element.trackcpp_e, p_in, accelerator.trackcpp_acc)
if ret > 0:
raise TrackingException
return p_in.squeeze()
@_interactive
def line_pass(
accelerator, particles, indices=None, element_offset=0,
parallel=False):
"""Track particle(s) along a line.
Accepts one or multiple particles initial positions. In the latter case,
a list of particles or a numpy 2D array (with particle as second index)
should be given as input; tracked particles positions at the entrances of
elements are output variables, as well as information on whether particles
have been lost along the tracking and where they were lost.
Keyword arguments: (accelerator, particles, indices, element_offset)
accelerator -- Accelerator object
particles -- initial 6D particle(s) position(s).
Few examples
ex.1: particles = [rx,px,ry,py,de,dl]
ex.2: particles = numpy.array([rx,px,ry,py,de,dl])
ex.3: particles = numpy.zeros((6, Np))
indices -- list of indices corresponding to accelerator elements at
whose entrances, tracked particles positions are to be
stored; string:
'open': corresponds to selecting all elements.
'closed' : equal 'open' plus the position at the end of the
last element.
element_offset -- element offset (default 0) for tracking. tracking will
start at the element with index 'element_offset'
parallel -- whether to parallelize calculation or not. If an integer is
passed that many processes will be used. If True, the number
of processes will be determined automatically.
Returns: (part_out, lost_flag, lost_element, lost_plane)
part_out -- 6D position for each particle at entrance of each element.
The structure of 'part_out' depends on inputs
'particles' and 'indices'. If 'indices' is None then only
tracked positions at the end of the line are returned.
There are still two possibilities for the structure of
part_out, depending on 'particles':
(1) if 'particles' is a single particle:
ex.:particles = [rx1,px1,ry1,py1,de1,dl1]
indices = None
part_out=numpy.array([rx2,px2,ry2,py2,de2,dl2])
(2) if 'particles' is numpy matrix with several particles,
then 'part_out' will be a matrix (numpy array of
arrays) whose first index picks a coordinate rx, px,
ry, py, de or dl, in this order, and the second index
selects a particular particle.
ex.:particles.shape == (6, Np)
indices = None
part_out.shape == (6, Np)
Now, if 'indices' is not None then 'part_out' can be either
(3) a numpy matrix, when 'particles' is a single particle. The
first index of 'part_out' runs through the particle
coordinate and the second through the element index.
(4) a numpy rank-3 tensor, when 'particles' is the initial
positions of many particles. The first index is the
coordinate index, the second index is the particle index
and the third index is the element index at whose
entrances particles coordinates are returned.
lost_flag -- a general flag indicating whether there has been particle
loss.
lost_element -- list of element index where each particle was lost
If the particle survived the tracking through the line its
corresponding element in this list is set to None. When
there is only one particle defined as a python list (not as
a numpy matrix with one column) 'lost_element' returns a
single number.
lost_plane -- list of strings representing on what plane each particle
was lost while being tracked. If the particle is not lost
then its corresponding element in the list is set to None.
If it is lost in the horizontal or vertical plane it is set
to string 'x' or 'y', correspondingly. If tracking is
performed with a single particle described as a python list
then 'lost_plane' returns a single string
"""
# checks whether single or multiple particles, reformats particles
p_in, indices = _process_args(accelerator, particles, indices)
indices = indices if indices is not None else [len(accelerator), ]
if not parallel:
p_out, lost_flag, lost_element, lost_plane = _line_pass(
accelerator, p_in, indices, element_offset)
else:
slcs = _get_slices_multiprocessing(parallel, p_in.shape[1])
with _multiproc.Pool(processes=len(slcs)) as pool:
res = []
for slc in slcs:
res.append(pool.apply_async(_line_pass, (
accelerator, p_in[:, slc], indices, element_offset)))
p_out, lost_element, lost_plane = [], [], []
lost_flag = False
for re_ in res:
part_out, lflag, lelement, lplane = re_.get()
lost_flag |= lflag
p_out.append(part_out)
lost_element.extend(lelement)
lost_plane.extend(lplane)
p_out = _np.concatenate(p_out, axis=1)
# simplifies output structure in case of single particle
if len(lost_element) == 1:
lost_element = lost_element[0]
lost_plane = lost_plane[0]
return p_out, lost_flag, lost_element, lost_plane
def _line_pass(accelerator, p_in, indices, element_offset):
# store only final position?
args = _trackcpp.LinePassArgs()
for idx in indices:
args.indices.push_back(int(idx))
args.element_offset = int(element_offset)
n_part = p_in.shape[1]
p_out = _np.zeros((6, n_part * len(indices)), dtype=float)
# tracking
lost_flag = bool(_trackcpp.track_linepass_wrapper(
accelerator.trackcpp_acc, p_in, p_out, args))
p_out = p_out.reshape(6, n_part, -1)
p_out = _np.squeeze(p_out)
# fills vectors with info about particle loss
lost_element = list(args.lost_element)
lost_plane = [LOST_PLANES[lp] for lp in args.lost_plane]
return p_out, lost_flag, lost_element, lost_plane
@_interactive
def ring_pass(
accelerator, particles, nr_turns=1, turn_by_turn=None,
element_offset=0, parallel=False):
"""Track particle(s) along a ring.
Accepts one or multiple particles initial positions. In the latter case,
a list of particles or a numpy 2D array (with particle as firts index)
should be given as input; tracked particles positions at the end of
the ring are output variables, as well as information on whether particles
have been lost along the tracking and where they were lost.
Keyword arguments: (accelerator, particles, nr_turns,
turn_by_turn, elment_offset)
accelerator -- Accelerator object
particles -- initial 6D particle(s) position(s).
Few examples
ex.1: particles = [rx,px,ry,py,de,dl]
ex.2: particles = numpy.array([rx,px,ry,py,de,dl])
ex.3: particles = numpy.zeros((6, Np))
nr_turns -- number of turns around ring to track each particle.
turn_by_turn -- parameter indicating what turn by turn positions are to
be returned. If None ringpass returns particles
positions only at the end of the ring, at the last turn.
If bool(turn_by_turn) is True, ringpass returns positions
at the beginning of every turn (including the first) and
at the end of the ring in the last turn.
element_offset -- element offset (default 0) for tracking. tracking will
start at the element with index 'element_offset'
parallel -- whether to parallelize calculation or not. If an integer is
passed that many processes will be used. If True, the number
of processes will be determined automatically.
Returns: (part_out, lost_flag, lost_turn, lost_element, lost_plane)
part_out -- 6D position for each particle at end of ring. The
structure of 'part_out' depends on inputs
'particles' and 'turn_by_turn'. If 'turn_by_turn' is None
then only tracked positions at the end 'nr_turns' are
returned. There are still two possibilities for the
structure of part_out, depending on 'particles':
(1) if 'particles' is a single particle, 'part_out' will
also be a unidimensional numpy array:
ex.:particles = [rx1,px1,ry1,py1,de1,dl1]
turn_by_turn = False
part_out = numpy.array([rx2,px2,ry2,py2,de2,dl2])
(2) if 'particles' is either a numpy matrix with several
particles then 'part_out' will be a matrix (numpy
array of arrays) whose first index selects the
coordinate rx, px, ry, py, de or dl, in this order,
and the second index selects a particular particle.
ex.: particles.shape == (6, Np)
turn_by_turn = False
part_out.shape == (6, Np))
'turn_by_turn' can also be either 'closed' or 'open'. In
either case 'part_out' will have tracked positions at
the entrances of the elements. The difference is that for
'closed' it will have an additional tracked position at
the exit of the last element, thus closing the data, in
case the line is a ring. The format of 'part_out' is
...
(3) a numpy matrix, when 'particles' is a single particle.
The first index of 'part_out' runs through coordinates
rx, px, ry, py, de or dl and the second index runs
through the turn number.
(4) a numpy rank-3 tensor, when 'particles' is the initial
positions of many particles. The first index runs
through coordinates, the second through particles and
the third through turn number.
lost_flag -- a general flag indicating whether there has been particle
loss.
lost_turn -- list of turn index where each particle was lost.
lost_element -- list of element index where each particle was lost
If the particle survived the tracking through the ring its
corresponding element in this list is set to None. When
there is only one particle defined as a python list (not as
a numpy matrix with one column) 'lost_element' returns a
single number.
lost_plane -- list of strings representing on what plane each particle
was lost while being tracked. If the particle is not lost
then its corresponding element in the list is set to None.
If it is lost in the horizontal or vertical plane it is set
to string 'x' or 'y', correspondingly. If tracking is
performed with a single particle described as a python list
then 'lost_plane' returns a single string.
"""
# checks whether single or multiple particles, reformats particles
p_in, *_ = _process_args(accelerator, particles, indices=None)
if not parallel:
p_out, lost_flag, lost_turn, lost_element, lost_plane = _ring_pass(
accelerator, p_in, nr_turns, turn_by_turn, element_offset)
else:
slcs = _get_slices_multiprocessing(parallel, p_in.shape[1])
with _multiproc.Pool(processes=len(slcs)) as pool:
res = []
for slc in slcs:
res.append(pool.apply_async(_ring_pass, (
accelerator, p_in[:, slc], nr_turns, turn_by_turn,
element_offset)))
p_out, lost_turn, lost_element, lost_plane = [], [], [], []
lost_flag = False
for re_ in res:
part_out, lflag, lturn, lelement, lplane = re_.get()
lost_flag |= lflag
p_out.append(part_out)
lost_turn.extend(lturn)
lost_element.extend(lelement)
lost_plane.extend(lplane)
p_out = _np.concatenate(p_out, axis=1)
# simplifies output structure in case of single particle
if len(lost_element) == 1:
lost_turn = lost_turn[0]
lost_element = lost_element[0]
lost_plane = lost_plane[0]
return p_out, lost_flag, lost_turn, lost_element, lost_plane
def _ring_pass(accelerator, p_in, nr_turns, turn_by_turn, element_offset):
# static parameters of ringpass
args = _trackcpp.RingPassArgs()
args.nr_turns = int(nr_turns)
args.trajectory = bool(turn_by_turn)
args.element_offset = int(element_offset)
n_part = p_in.shape[1]
if bool(turn_by_turn):
p_out = _np.zeros((6, n_part*(nr_turns+1)), dtype=float)
else:
p_out = _np.zeros((6, n_part), dtype=float)
# tracking
lost_flag = bool(_trackcpp.track_ringpass_wrapper(
accelerator.trackcpp_acc, p_in, p_out, args))
p_out = p_out.reshape(6, n_part, -1)
p_out = | _np.squeeze(p_out) | numpy.squeeze |
from collections import namedtuple
import numpy as np
from cloudnetpy import utils
class Fmcw94Bin:
"""RPG Cloud Radar Level 1 data reader."""
def __init__(self, filename):
self.filename = filename
self._file_position = 0
self.header = self.read_rpg_header()
self.data = self.read_rpg_data()
def read_rpg_header(self):
"""Reads the header or rpg binary file."""
def append(names, dtype=np.int32, n_values=1):
"""Updates header dictionary."""
for name in names:
header[name] = np.fromfile(file, dtype, int(n_values))
header = {}
file = open(self.filename, 'rb')
append(('file_code',
'_header_length'), np.int32)
append(('_start_time',
'_stop_time'), np.uint32)
append(('program_number',))
append(('model_number',)) # 0 = single polarization, 1 = dual pol.
header['_program_name'] = self.read_string(file)
header['_customer_name'] = self.read_string(file)
append(('radar_frequency',
'antenna_separation',
'antenna_diameter',
'antenna_gain', # linear
'half_power_beam_width'), np.float32)
append(('dual_polarization',), np.int8) # 0 = single pol, 1 = LDR, 2 = STSR
append(('sample_duration',), np.float32)
append(('latitude',
'longitude'), np.float32)
append(('calibration_interval',
'_number_of_range_gates',
'_number_of_temperature_levels',
'_number_of_humidity_levels',
'_number_of_chirp_sequences'))
append(('range',), np.float32, int(header['_number_of_range_gates']))
append(('_temperature_levels',), np.float32,
int(header['_number_of_temperature_levels']))
append(('_humidity_levels',), np.float32,
int(header['_number_of_humidity_levels']))
append(('number_of_spectral_samples',
'chirp_start_indices',
'number_of_averaged_chirps'),
n_values=int(header['_number_of_chirp_sequences']))
append(('integration_time',
'range_resolution',
'nyquist_velocity'), np.float32,
int(header['_number_of_chirp_sequences']))
append(('_is_power_levelling',
'_is_spike_filter',
'_is_phase_correction',
'_is_relative_power_correction'), np.int8)
append(('FFT_window',), np.int8) # 0=square, 1=parzen, 2=blackman, 3=welch, 4=slepian2, 5=slepian3
append(('input_voltage_range',))
append(('noise_threshold',), np.float32)
# Fix for Level 1 version 4 files:
if int(header['file_code']) >= 889348:
_ = np.fromfile(file, np.int32, 25)
_ = np.fromfile(file, np.uint32, 10000)
self._file_position = file.tell()
file.close()
return header
@staticmethod
def read_string(file_id):
"""Read characters from binary data until whitespace."""
str_out = ''
while True:
c = | np.fromfile(file_id, np.int8, 1) | numpy.fromfile |
# This module has been generated automatically from space group information
# obtained from the Computational Crystallography Toolbox
#
"""
Space groups
This module contains a list of all the 230 space groups that can occur in
a crystal. The variable space_groups contains a dictionary that maps
space group numbers and space group names to the corresponding space
group objects.
.. moduleauthor:: <NAME> <<EMAIL>>
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2013 The Mosaic Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file LICENSE.txt, distributed as part of this software.
#-----------------------------------------------------------------------------
import numpy as N
class SpaceGroup(object):
"""
Space group
All possible space group objects are created in this module. Other
modules should access these objects through the dictionary
space_groups rather than create their own space group objects.
"""
def __init__(self, number, symbol, transformations):
"""
:param number: the number assigned to the space group by
international convention
:type number: int
:param symbol: the Hermann-Mauguin space-group symbol as used
in PDB and mmCIF files
:type symbol: str
:param transformations: a list of space group transformations,
each consisting of a tuple of three
integer arrays (rot, tn, td), where
rot is the rotation matrix and tn/td
are the numerator and denominator of the
translation vector. The transformations
are defined in fractional coordinates.
:type transformations: list
"""
self.number = number
self.symbol = symbol
self.transformations = transformations
self.transposed_rotations = N.array([N.transpose(t[0])
for t in transformations])
self.phase_factors = N.exp(N.array([(-2j*N.pi*t[1])/t[2]
for t in transformations]))
def __repr__(self):
return "SpaceGroup(%d, %s)" % (self.number, repr(self.symbol))
def __len__(self):
"""
:return: the number of space group transformations
:rtype: int
"""
return len(self.transformations)
def symmetryEquivalentMillerIndices(self, hkl):
"""
:param hkl: a set of Miller indices
:type hkl: Scientific.N.array_type
:return: a tuple (miller_indices, phase_factor) of two arrays
of length equal to the number of space group
transformations. miller_indices contains the Miller
indices of each reflection equivalent by symmetry to the
reflection hkl (including hkl itself as the first element).
phase_factor contains the phase factors that must be applied
to the structure factor of reflection hkl to obtain the
structure factor of the symmetry equivalent reflection.
:rtype: tuple
"""
hkls = N.dot(self.transposed_rotations, hkl)
p = N.multiply.reduce(self.phase_factors**hkl, -1)
return hkls, p
space_groups = {}
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(1, 'P 1', transformations)
space_groups[1] = sg
space_groups['P 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(2, 'P -1', transformations)
space_groups[2] = sg
space_groups['P -1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(3, 'P 1 2 1', transformations)
space_groups[3] = sg
space_groups['P 1 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(4, 'P 1 21 1', transformations)
space_groups[4] = sg
space_groups['P 1 21 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(5, 'C 1 2 1', transformations)
space_groups[5] = sg
space_groups['C 1 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(6, 'P 1 m 1', transformations)
space_groups[6] = sg
space_groups['P 1 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(7, 'P 1 c 1', transformations)
space_groups[7] = sg
space_groups['P 1 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(8, 'C 1 m 1', transformations)
space_groups[8] = sg
space_groups['C 1 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(9, 'C 1 c 1', transformations)
space_groups[9] = sg
space_groups['C 1 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(10, 'P 1 2/m 1', transformations)
space_groups[10] = sg
space_groups['P 1 2/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(11, 'P 1 21/m 1', transformations)
space_groups[11] = sg
space_groups['P 1 21/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(12, 'C 1 2/m 1', transformations)
space_groups[12] = sg
space_groups['C 1 2/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(13, 'P 1 2/c 1', transformations)
space_groups[13] = sg
space_groups['P 1 2/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(14, 'P 1 21/c 1', transformations)
space_groups[14] = sg
space_groups['P 1 21/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(15, 'C 1 2/c 1', transformations)
space_groups[15] = sg
space_groups['C 1 2/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(16, 'P 2 2 2', transformations)
space_groups[16] = sg
space_groups['P 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(17, 'P 2 2 21', transformations)
space_groups[17] = sg
space_groups['P 2 2 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(18, 'P 21 21 2', transformations)
space_groups[18] = sg
space_groups['P 21 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(19, 'P 21 21 21', transformations)
space_groups[19] = sg
space_groups['P 21 21 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(20, 'C 2 2 21', transformations)
space_groups[20] = sg
space_groups['C 2 2 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(21, 'C 2 2 2', transformations)
space_groups[21] = sg
space_groups['C 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(22, 'F 2 2 2', transformations)
space_groups[22] = sg
space_groups['F 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(23, 'I 2 2 2', transformations)
space_groups[23] = sg
space_groups['I 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(24, 'I 21 21 21', transformations)
space_groups[24] = sg
space_groups['I 21 21 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(25, 'P m m 2', transformations)
space_groups[25] = sg
space_groups['P m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(26, 'P m c 21', transformations)
space_groups[26] = sg
space_groups['P m c 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(27, 'P c c 2', transformations)
space_groups[27] = sg
space_groups['P c c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(28, 'P m a 2', transformations)
space_groups[28] = sg
space_groups['P m a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(29, 'P c a 21', transformations)
space_groups[29] = sg
space_groups['P c a 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(30, 'P n c 2', transformations)
space_groups[30] = sg
space_groups['P n c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(31, 'P m n 21', transformations)
space_groups[31] = sg
space_groups['P m n 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(32, 'P b a 2', transformations)
space_groups[32] = sg
space_groups['P b a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(33, 'P n a 21', transformations)
space_groups[33] = sg
space_groups['P n a 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(34, 'P n n 2', transformations)
space_groups[34] = sg
space_groups['P n n 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(35, 'C m m 2', transformations)
space_groups[35] = sg
space_groups['C m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(36, 'C m c 21', transformations)
space_groups[36] = sg
space_groups['C m c 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(37, 'C c c 2', transformations)
space_groups[37] = sg
space_groups['C c c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(38, 'A m m 2', transformations)
space_groups[38] = sg
space_groups['A m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(39, 'A b m 2', transformations)
space_groups[39] = sg
space_groups['A b m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(40, 'A m a 2', transformations)
space_groups[40] = sg
space_groups['A m a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(41, 'A b a 2', transformations)
space_groups[41] = sg
space_groups['A b a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(42, 'F m m 2', transformations)
space_groups[42] = sg
space_groups['F m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(43, 'F d d 2', transformations)
space_groups[43] = sg
space_groups['F d d 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(44, 'I m m 2', transformations)
space_groups[44] = sg
space_groups['I m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(45, 'I b a 2', transformations)
space_groups[45] = sg
space_groups['I b a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(46, 'I m a 2', transformations)
space_groups[46] = sg
space_groups['I m a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(47, 'P m m m', transformations)
space_groups[47] = sg
space_groups['P m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(48, 'P n n n :2', transformations)
space_groups[48] = sg
space_groups['P n n n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(49, 'P c c m', transformations)
space_groups[49] = sg
space_groups['P c c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(50, 'P b a n :2', transformations)
space_groups[50] = sg
space_groups['P b a n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(51, 'P m m a', transformations)
space_groups[51] = sg
space_groups['P m m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(52, 'P n n a', transformations)
space_groups[52] = sg
space_groups['P n n a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(53, 'P m n a', transformations)
space_groups[53] = sg
space_groups['P m n a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(54, 'P c c a', transformations)
space_groups[54] = sg
space_groups['P c c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(55, 'P b a m', transformations)
space_groups[55] = sg
space_groups['P b a m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(56, 'P c c n', transformations)
space_groups[56] = sg
space_groups['P c c n'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(57, 'P b c m', transformations)
space_groups[57] = sg
space_groups['P b c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(58, 'P n n m', transformations)
space_groups[58] = sg
space_groups['P n n m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(59, 'P m m n :2', transformations)
space_groups[59] = sg
space_groups['P m m n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(60, 'P b c n', transformations)
space_groups[60] = sg
space_groups['P b c n'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(61, 'P b c a', transformations)
space_groups[61] = sg
space_groups['P b c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(62, 'P n m a', transformations)
space_groups[62] = sg
space_groups['P n m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(63, 'C m c m', transformations)
space_groups[63] = sg
space_groups['C m c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(64, 'C m c a', transformations)
space_groups[64] = sg
space_groups['C m c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(65, 'C m m m', transformations)
space_groups[65] = sg
space_groups['C m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(66, 'C c c m', transformations)
space_groups[66] = sg
space_groups['C c c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(67, 'C m m a', transformations)
space_groups[67] = sg
space_groups['C m m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(68, 'C c c a :2', transformations)
space_groups[68] = sg
space_groups['C c c a :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(69, 'F m m m', transformations)
space_groups[69] = sg
space_groups['F m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,3,3])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,0,3])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(70, 'F d d d :2', transformations)
space_groups[70] = sg
space_groups['F d d d :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(71, 'I m m m', transformations)
space_groups[71] = sg
space_groups['I m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(72, 'I b a m', transformations)
space_groups[72] = sg
space_groups['I b a m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(73, 'I b c a', transformations)
space_groups[73] = sg
space_groups['I b c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(74, 'I m m a', transformations)
space_groups[74] = sg
space_groups['I m m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(75, 'P 4', transformations)
space_groups[75] = sg
space_groups['P 4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(76, 'P 41', transformations)
space_groups[76] = sg
space_groups['P 41'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(77, 'P 42', transformations)
space_groups[77] = sg
space_groups['P 42'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(78, 'P 43', transformations)
space_groups[78] = sg
space_groups['P 43'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(79, 'I 4', transformations)
space_groups[79] = sg
space_groups['I 4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(80, 'I 41', transformations)
space_groups[80] = sg
space_groups['I 41'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(81, 'P -4', transformations)
space_groups[81] = sg
space_groups['P -4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(82, 'I -4', transformations)
space_groups[82] = sg
space_groups['I -4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(83, 'P 4/m', transformations)
space_groups[83] = sg
space_groups['P 4/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(84, 'P 42/m', transformations)
space_groups[84] = sg
space_groups['P 42/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(85, 'P 4/n :2', transformations)
space_groups[85] = sg
space_groups['P 4/n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(86, 'P 42/n :2', transformations)
space_groups[86] = sg
space_groups['P 42/n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(87, 'I 4/m', transformations)
space_groups[87] = sg
space_groups['I 4/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,5,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(88, 'I 41/a :2', transformations)
space_groups[88] = sg
space_groups['I 41/a :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(89, 'P 4 2 2', transformations)
space_groups[89] = sg
space_groups['P 4 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(90, 'P 4 21 2', transformations)
space_groups[90] = sg
space_groups['P 4 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(91, 'P 41 2 2', transformations)
space_groups[91] = sg
space_groups['P 41 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(92, 'P 41 21 2', transformations)
space_groups[92] = sg
space_groups['P 41 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(93, 'P 42 2 2', transformations)
space_groups[93] = sg
space_groups['P 42 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(94, 'P 42 21 2', transformations)
space_groups[94] = sg
space_groups['P 42 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(95, 'P 43 2 2', transformations)
space_groups[95] = sg
space_groups['P 43 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(96, 'P 43 21 2', transformations)
space_groups[96] = sg
space_groups['P 43 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(97, 'I 4 2 2', transformations)
space_groups[97] = sg
space_groups['I 4 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(98, 'I 41 2 2', transformations)
space_groups[98] = sg
space_groups['I 41 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(99, 'P 4 m m', transformations)
space_groups[99] = sg
space_groups['P 4 m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(100, 'P 4 b m', transformations)
space_groups[100] = sg
space_groups['P 4 b m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(101, 'P 42 c m', transformations)
space_groups[101] = sg
space_groups['P 42 c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(102, 'P 42 n m', transformations)
space_groups[102] = sg
space_groups['P 42 n m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(103, 'P 4 c c', transformations)
space_groups[103] = sg
space_groups['P 4 c c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(104, 'P 4 n c', transformations)
space_groups[104] = sg
space_groups['P 4 n c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(105, 'P 42 m c', transformations)
space_groups[105] = sg
space_groups['P 42 m c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(106, 'P 42 b c', transformations)
space_groups[106] = sg
space_groups['P 42 b c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(107, 'I 4 m m', transformations)
space_groups[107] = sg
space_groups['I 4 m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(108, 'I 4 c m', transformations)
space_groups[108] = sg
space_groups['I 4 c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(109, 'I 41 m d', transformations)
space_groups[109] = sg
space_groups['I 41 m d'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(110, 'I 41 c d', transformations)
space_groups[110] = sg
space_groups['I 41 c d'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(111, 'P -4 2 m', transformations)
space_groups[111] = sg
space_groups['P -4 2 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(112, 'P -4 2 c', transformations)
space_groups[112] = sg
space_groups['P -4 2 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(113, 'P -4 21 m', transformations)
space_groups[113] = sg
space_groups['P -4 21 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(114, 'P -4 21 c', transformations)
space_groups[114] = sg
space_groups['P -4 21 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(115, 'P -4 m 2', transformations)
space_groups[115] = sg
space_groups['P -4 m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(116, 'P -4 c 2', transformations)
space_groups[116] = sg
space_groups['P -4 c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(117, 'P -4 b 2', transformations)
space_groups[117] = sg
space_groups['P -4 b 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(118, 'P -4 n 2', transformations)
space_groups[118] = sg
space_groups['P -4 n 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(119, 'I -4 m 2', transformations)
space_groups[119] = sg
space_groups['I -4 m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(120, 'I -4 c 2', transformations)
space_groups[120] = sg
space_groups['I -4 c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(121, 'I -4 2 m', transformations)
space_groups[121] = sg
space_groups['I -4 2 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(122, 'I -4 2 d', transformations)
space_groups[122] = sg
space_groups['I -4 2 d'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(123, 'P 4/m m m', transformations)
space_groups[123] = sg
space_groups['P 4/m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(124, 'P 4/m c c', transformations)
space_groups[124] = sg
space_groups['P 4/m c c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(125, 'P 4/n b m :2', transformations)
space_groups[125] = sg
space_groups['P 4/n b m :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(126, 'P 4/n n c :2', transformations)
space_groups[126] = sg
space_groups['P 4/n n c :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(127, 'P 4/m b m', transformations)
space_groups[127] = sg
space_groups['P 4/m b m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(128, 'P 4/m n c', transformations)
space_groups[128] = sg
space_groups['P 4/m n c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(129, 'P 4/n m m :2', transformations)
space_groups[129] = sg
space_groups['P 4/n m m :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(130, 'P 4/n c c :2', transformations)
space_groups[130] = sg
space_groups['P 4/n c c :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(131, 'P 42/m m c', transformations)
space_groups[131] = sg
space_groups['P 42/m m c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(132, 'P 42/m c m', transformations)
space_groups[132] = sg
space_groups['P 42/m c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(133, 'P 42/n b c :2', transformations)
space_groups[133] = sg
space_groups['P 42/n b c :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(134, 'P 42/n n m :2', transformations)
space_groups[134] = sg
space_groups['P 42/n n m :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(135, 'P 42/m b c', transformations)
space_groups[135] = sg
space_groups['P 42/m b c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(136, 'P 42/m n m', transformations)
space_groups[136] = sg
space_groups['P 42/m n m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(137, 'P 42/n m c :2', transformations)
space_groups[137] = sg
space_groups['P 42/n m c :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(138, 'P 42/n c m :2', transformations)
space_groups[138] = sg
space_groups['P 42/n c m :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(139, 'I 4/m m m', transformations)
space_groups[139] = sg
space_groups['I 4/m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(140, 'I 4/m c m', transformations)
space_groups[140] = sg
space_groups['I 4/m c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,5,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,5,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,3,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(141, 'I 41/a m d :2', transformations)
space_groups[141] = sg
space_groups['I 41/a m d :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,5,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,5,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(142, 'I 41/a c d :2', transformations)
space_groups[142] = sg
space_groups['I 41/a c d :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(143, 'P 3', transformations)
space_groups[143] = sg
space_groups['P 3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(144, 'P 31', transformations)
space_groups[144] = sg
space_groups['P 31'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(145, 'P 32', transformations)
space_groups[145] = sg
space_groups['P 32'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(146, 'R 3 :H', transformations)
space_groups[146] = sg
space_groups['R 3 :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(147, 'P -3', transformations)
space_groups[147] = sg
space_groups['P -3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(148, 'R -3 :H', transformations)
space_groups[148] = sg
space_groups['R -3 :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(149, 'P 3 1 2', transformations)
space_groups[149] = sg
space_groups['P 3 1 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(150, 'P 3 2 1', transformations)
space_groups[150] = sg
space_groups['P 3 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(151, 'P 31 1 2', transformations)
space_groups[151] = sg
space_groups['P 31 1 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(152, 'P 31 2 1', transformations)
space_groups[152] = sg
space_groups['P 31 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(153, 'P 32 1 2', transformations)
space_groups[153] = sg
space_groups['P 32 1 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(154, 'P 32 2 1', transformations)
space_groups[154] = sg
space_groups['P 32 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(155, 'R 3 2 :H', transformations)
space_groups[155] = sg
space_groups['R 3 2 :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(156, 'P 3 m 1', transformations)
space_groups[156] = sg
space_groups['P 3 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(157, 'P 3 1 m', transformations)
space_groups[157] = sg
space_groups['P 3 1 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(158, 'P 3 c 1', transformations)
space_groups[158] = sg
space_groups['P 3 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(159, 'P 3 1 c', transformations)
space_groups[159] = sg
space_groups['P 3 1 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(160, 'R 3 m :H', transformations)
space_groups[160] = sg
space_groups['R 3 m :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,7])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,7])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,7])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,5])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,5])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,5])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(161, 'R 3 c :H', transformations)
space_groups[161] = sg
space_groups['R 3 c :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(162, 'P -3 1 m', transformations)
space_groups[162] = sg
space_groups['P -3 1 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(163, 'P -3 1 c', transformations)
space_groups[163] = sg
space_groups['P -3 1 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(164, 'P -3 m 1', transformations)
space_groups[164] = sg
space_groups['P -3 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(165, 'P -3 c 1', transformations)
space_groups[165] = sg
space_groups['P -3 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(166, 'R -3 m :H', transformations)
space_groups[166] = sg
space_groups['R -3 m :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,7])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,7])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,7])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,1])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,1])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,1])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,5])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,5])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,5])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,-1])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,-1])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,-1])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(167, 'R -3 c :H', transformations)
space_groups[167] = sg
space_groups['R -3 c :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(168, 'P 6', transformations)
space_groups[168] = sg
space_groups['P 6'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,5])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(169, 'P 61', transformations)
space_groups[169] = sg
space_groups['P 61'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,5])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(170, 'P 65', transformations)
space_groups[170] = sg
space_groups['P 65'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(171, 'P 62', transformations)
space_groups[171] = sg
space_groups['P 62'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(172, 'P 64', transformations)
space_groups[172] = sg
space_groups['P 64'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(173, 'P 63', transformations)
space_groups[173] = sg
space_groups['P 63'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(174, 'P -6', transformations)
space_groups[174] = sg
space_groups['P -6'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(175, 'P 6/m', transformations)
space_groups[175] = sg
space_groups['P 6/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(176, 'P 63/m', transformations)
space_groups[176] = sg
space_groups['P 63/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(177, 'P 6 2 2', transformations)
space_groups[177] = sg
space_groups['P 6 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,5])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,5])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(178, 'P 61 2 2', transformations)
space_groups[178] = sg
space_groups['P 61 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,5])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,5])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(179, 'P 65 2 2', transformations)
space_groups[179] = sg
space_groups['P 65 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(180, 'P 62 2 2', transformations)
space_groups[180] = sg
space_groups['P 62 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(181, 'P 64 2 2', transformations)
space_groups[181] = sg
space_groups['P 64 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(182, 'P 63 2 2', transformations)
space_groups[182] = sg
space_groups['P 63 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(183, 'P 6 m m', transformations)
space_groups[183] = sg
space_groups['P 6 m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(184, 'P 6 c c', transformations)
space_groups[184] = sg
space_groups['P 6 c c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(185, 'P 63 c m', transformations)
space_groups[185] = sg
space_groups['P 63 c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(186, 'P 63 m c', transformations)
space_groups[186] = sg
space_groups['P 63 m c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(187, 'P -6 m 2', transformations)
space_groups[187] = sg
space_groups['P -6 m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(188, 'P -6 c 2', transformations)
space_groups[188] = sg
space_groups['P -6 c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(189, 'P -6 2 m', transformations)
space_groups[189] = sg
space_groups['P -6 2 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(190, 'P -6 2 c', transformations)
space_groups[190] = sg
space_groups['P -6 2 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(191, 'P 6/m m m', transformations)
space_groups[191] = sg
space_groups['P 6/m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(192, 'P 6/m c c', transformations)
space_groups[192] = sg
space_groups['P 6/m c c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(193, 'P 63/m c m', transformations)
space_groups[193] = sg
space_groups['P 63/m c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(194, 'P 63/m m c', transformations)
space_groups[194] = sg
space_groups['P 63/m m c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(195, 'P 2 3', transformations)
space_groups[195] = sg
space_groups['P 2 3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(196, 'F 2 3', transformations)
space_groups[196] = sg
space_groups['F 2 3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(197, 'I 2 3', transformations)
space_groups[197] = sg
space_groups['I 2 3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(198, 'P 21 3', transformations)
space_groups[198] = sg
space_groups['P 21 3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(199, 'I 21 3', transformations)
space_groups[199] = sg
space_groups['I 21 3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(200, 'P m -3', transformations)
space_groups[200] = sg
space_groups['P m -3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(201, 'P n -3 :2', transformations)
space_groups[201] = sg
space_groups['P n -3 :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(202, 'F m -3', transformations)
space_groups[202] = sg
space_groups['F m -3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,3,3])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,3,3])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,3,3])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,0,3])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,0,3])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,0,3])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(203, 'F d -3 :2', transformations)
space_groups[203] = sg
space_groups['F d -3 :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(204, 'I m -3', transformations)
space_groups[204] = sg
space_groups['I m -3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(205, 'P a -3', transformations)
space_groups[205] = sg
space_groups['P a -3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(206, 'I a -3', transformations)
space_groups[206] = sg
space_groups['I a -3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(207, 'P 4 3 2', transformations)
space_groups[207] = sg
space_groups['P 4 3 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(208, 'P 42 3 2', transformations)
space_groups[208] = sg
space_groups['P 42 3 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(209, 'F 4 3 2', transformations)
space_groups[209] = sg
space_groups['F 4 3 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(210, 'F 41 3 2', transformations)
space_groups[210] = sg
space_groups['F 41 3 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(211, 'I 4 3 2', transformations)
space_groups[211] = sg
space_groups['I 4 3 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(212, 'P 43 3 2', transformations)
space_groups[212] = sg
space_groups['P 43 3 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(213, 'P 41 3 2', transformations)
space_groups[213] = sg
space_groups['P 41 3 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,5,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,5,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,5,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,5,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,5,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,5,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(214, 'I 41 3 2', transformations)
space_groups[214] = sg
space_groups['I 41 3 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(215, 'P -4 3 m', transformations)
space_groups[215] = sg
space_groups['P -4 3 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(216, 'F -4 3 m', transformations)
space_groups[216] = sg
space_groups['F -4 3 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(217, 'I -4 3 m', transformations)
space_groups[217] = sg
space_groups['I -4 3 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(218, 'P -4 3 n', transformations)
space_groups[218] = sg
space_groups['P -4 3 n'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(219, 'F -4 3 c', transformations)
space_groups[219] = sg
space_groups['F -4 3 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,5,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,5,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,5,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,5,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,3,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,5,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,5,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(220, 'I -4 3 d', transformations)
space_groups[220] = sg
space_groups['I -4 3 d'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(221, 'P m -3 m', transformations)
space_groups[221] = sg
space_groups['P m -3 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(222, 'P n -3 n :2', transformations)
space_groups[222] = sg
space_groups['P n -3 n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(223, 'P m -3 n', transformations)
space_groups[223] = sg
space_groups['P m -3 n'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(224, 'P n -3 m :2', transformations)
space_groups[224] = sg
space_groups['P n -3 m :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(225, 'F m -3 m', transformations)
space_groups[225] = sg
space_groups['F m -3 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(226, 'F m -3 c', transformations)
space_groups[226] = sg
space_groups['F m -3 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,3,3])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,3,3])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,3,3])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,3,3])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,3,3])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,3,3])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,0,3])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,0,3])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,0,3])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,0,3])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,0,3])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,0,3])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,3,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(227, 'F d -3 m :2', transformations)
space_groups[227] = sg
space_groups['F d -3 m :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,3])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,3])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,3])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-3])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-3])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-3])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-3])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-3])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-3])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,5,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,5,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,3,5])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,3,5])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,3,3])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,3,3])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,3,3])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,5,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,3,5])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,-1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,-1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,-1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,-1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,-1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,-1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,0,5])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,0,5])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,0,3])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,0,3])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,0,3])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,0,5])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,-1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,-3,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,-3,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,-1,-1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,-1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,-1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,-3,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,-1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,-1,-1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,5,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,5,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,5,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,-3])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,-1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,-1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,-3])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-3])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-3])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = | N.array([1,0,0,0,1,0,0,0,-1]) | numpy.array |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
""""
This module contains tools for backend modeling and scoring
EDITED FOR UEF SUMMERSCHOOL
"""
__version__ = '1.1'
__author__ = '<NAME>, <NAME>'
__email__ = '<EMAIL>'
import numpy as np
from scipy.linalg import eigh, cholesky, inv, svd, solve
import time
class GPLDA:
def __init__(self,tv_dim, nphi, niter):
self.tv_dim = tv_dim
self.nphi = nphi
self.niter = niter
self.Sigma = 1./self.tv_dim * np.eye(self.tv_dim)
self.Phi = np.r_[np.eye(self.nphi), np.zeros((self.tv_dim-self.nphi, self.nphi))]
self.Sb = np.zeros((self.tv_dim, self.tv_dim))
self.St = np.zeros((self.tv_dim, self.tv_dim))
def train_ml(self, data, spk_labs):
classes, labels = unique(spk_labs, return_ind = True)
nclasses = classes.size
Sw = compute_within_cov(data, labels, nclasses)
self.St = np.cov(data)
self.Sb = self.St - Sw
def train_em(self, data, spk_labs):
# make sure the labels are sorted
spk_labs = unique(spk_labs, return_ind = True)[1]
spk_labs, I = np.sort(spk_labs), np.argsort(spk_labs)
data = data[:, I]
spk_counts = np.bincount(spk_labs) # sessions per speaker
print('\n\nRandomly initializing the PLDA hyperparameters ...\n\n')
# Sigma = np.cov(data.T)
# Phi = np.random.randn((self.tv_dim, nphi))
nspks = spk_counts.size
F = np.zeros((self.tv_dim, nspks))
cnt = 0
for spk in range(nspks):
# Speaker indices
idx = np.arange(spk_counts[spk]) + cnt
F[:, spk] = data[:, idx].sum(1)
cnt += spk_counts[spk]
data_cov = data.dot(data.T)
print('Re-estimating the Eigenvoice subspace with {} factors ...\n'.format(self.nphi))
for iter in range(self.niter):
print('EM iter#: {} \t'.format(iter+1), end=" ")
tic = time.time()
# expectation
Ey, Eyy = self.expectation_plda(data, F, spk_counts);
# maximization
self.maximization_plda(data, data_cov, F, Ey, Eyy)
llk = self.comp_llk(data)
toc = time.time() - tic
print('[llk = {0:.2f}] \t [elaps = {1:.2f} s]'.format(llk, toc))
self.Sb = self.Phi.dot(self.Phi.T)
self.St = self.Sb + self.Sigma
def expectation_plda(self, data, F, spk_counts):
# computes the posterior mean and covariance of the factors
nsamples = data.shape[1]
nspks = spk_counts.size
Eyy = np.zeros((self.nphi,self.nphi))
Ey_spk = np.zeros((self.nphi, nspks))
# initialize common terms to save computations
uniqFreqs = unique(spk_counts)
nuniq = uniqFreqs.size
invTerms = np.empty((nuniq,self.nphi,self.nphi))
PhiT_invS = solve(self.Sigma.T, self.Phi).T
PhiT_invS_Phi = PhiT_invS.dot(self.Phi)
I = np.eye(self.nphi)
for ix in range(nuniq):
nPhiT_invS_Phi = uniqFreqs[ix] * PhiT_invS_Phi
invTerms[ix] = inv(I + nPhiT_invS_Phi)
for spk in range(nspks):
nsessions = spk_counts[spk]
PhiT_invS_y = PhiT_invS.dot(F[:, spk])
idx = np.flatnonzero(uniqFreqs == nsessions)[0]
Cyy = invTerms[idx]
Ey_spk[:, spk] = Cyy.dot(PhiT_invS_y)
Eyy += nsessions * Cyy
Eyy += (Ey_spk * spk_counts.T).dot(Ey_spk.T)
return Ey_spk, Eyy
def comp_llk(self, data):
nsamples = data.shape[1]
S = self.Phi.dot(self.Phi.T) + self.Sigma
llk = -0.5 * (self.tv_dim * nsamples * np.log(2*np.pi) \
+ nsamples * logdet(S) + np.sum(data*solve(S,data)))
return llk
def maximization_plda(self, data, data_cov, F, Ey, Eyy):
# ML re-estimation of the Eignevoice subspace and the covariance of the
# residual noise (full).
nsamples = data.shape[1]
Ey_FT = Ey.dot(F.T)
self.Phi = solve(Eyy.T,Ey_FT).T
self.Sigma = 1./nsamples * (data_cov - self.Phi.dot(Ey_FT))
def score_trials(self, model_iv, test_iv):
nphi = self.Phi.shape[0]
iSt = inv(self.St)
iS = inv(self.St-self.Sb.dot(iSt).dot(self.Sb))
Q = iSt-iS
P = iSt.dot(self.Sb).dot(iS)
U, s, V = svd(P, full_matrices=False)
Lambda = np.diag(s[:nphi])
Uk = U[:,:nphi]
Q_hat = Uk.T.dot(Q).dot(Uk)
model_iv = Uk.T.dot(model_iv)
test_iv = Uk.T.dot(test_iv)
score_h1 = np.sum(model_iv.T.dot(Q_hat) * model_iv.T, 1, keepdims=True)
score_h2 = np.sum(test_iv.T.dot(Q_hat) * test_iv.T, 1, keepdims=True)
score_h1h2 = 2 * model_iv.T.dot(Lambda).dot(test_iv)
scores = score_h1h2 + score_h1 + score_h2.T
return scores
def unique(arr, return_ind=False):
if return_ind:
k = 0
d = dict()
uniques = np.empty(arr.size, dtype=arr.dtype)
indexes = np.empty(arr.size, dtype='i')
for i, a in enumerate(arr):
if a in d:
indexes[i] = d[a]
else:
indexes[i] = k
uniques[k] = a
d[a] = k
k += 1
return uniques[:k], indexes
else:
_, idx = np.unique(arr, return_index=True)
return arr[np.sort(idx)]
def unit_len_norm(data):
data_norm = np.sqrt(np.sum(data * data, 0))
data_norm[data_norm == 0] = 1.
return data / data_norm
def calc_white_mat(Sw):
# calculates the whitening transformation for cov matrix X
w = cholesky(inv(Sw), lower=True)
return w
def logdet(A):
u = cholesky(A)
y = 2*np.log(np.diag(u)).sum()
return y
def wccn(data, labels):
nclasses = np.unique(labels).size
Sw = compute_within_cov(data, labels, nclasses)
Sw = Sw + 1e-6 * np.eye(Sw.shape[0])
return calc_white_mat(Sw)
def compute_class_avg(data, labels, nclasses):
ndim = data.shape[0]
mu_c = np.zeros((nclasses, ndim))
for c in labels: # numeric labels are assumed
idx = np.flatnonzero(labels == c)
mu_c[c] = data[:, idx].mean(1)
return mu_c
def compute_within_cov(data, labels, nclasses, adapt=False):
mu_c = compute_class_avg(data, labels, nclasses) # numeric labels are assumed
data_mu = data - mu_c[labels].T
Sw = np.cov(data_mu)
# Sw = data_mu.dot(data_mu.T)
return Sw
def lda(data, labels, adapt=False):
ndim, nobs = data.shape
if nobs != len(labels):
raise ValueError("oh dear! number of data samples ({}) should match the label size ({})!".format(nobs, len(labels)))
M = data.mean(1, keepdims=True) # centering the data
data = data - M
classes, labels = unique(labels, return_ind=True) # make sure labels are numerical
nclasses = classes.size
Sw = compute_within_cov(data, labels, nclasses)
St = | np.cov(data) | numpy.cov |
# -*- coding: utf-8 -*-
# Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is
# holder of all proprietary rights on this computer program.
# You can only use this computer program if you have closed
# a license agreement with MPG or you get the right to use the computer
# program from someone who is authorized to grant you that right.
# Any use of the computer program without a valid license is prohibited and
# liable to prosecution.
#
# Copyright©2019 Max-Planck-Gesellschaft zur Förderung
# der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute
# for Intelligent Systems and the Max Planck Institute for Biological
# Cybernetics. All rights reserved.
#
# Contact: <EMAIL>
# ../blender-2.79-linux-glibc219-x86_64/blender -b -P generate_multiHumanPose.py -- 10 0 3
import sys
print(sys.version)
import bpy
from bpy_extras.object_utils import world_to_camera_view as world2cam
import numpy as np
from mathutils import Matrix, Vector, Quaternion, Euler
import math
from os.path import join, dirname, realpath, exists, basename
from os import listdir, remove
from glob import glob
from random import choice
from os import remove
import cv2
from scipy.io import loadmat, savemat
import json
import importlib.util
from itertools import combinations
from scipy.special import binom
from scipy import array as array
import subprocess
import pickle
import bounding_box_collision as collisionDetector
import ray_back_projection_visibleArea as ray_back_projection
# Todo: please add your own working directory and the path to your virtualenv here
# both paths should not end with a "/"
working_dir = '' #absolute path to datageneration
collisionVirtualenv = '' #path to root of virtual environment. rest of path (/bin/activate) will be added later in the script
###################################
#Todo: set output directory
PATH_out = '' #outout folder
PATH_tmp = '' # tmp-output folder
mocapDataName = 'mocapAll'
dataset_path_prefix = 'testHands'
###################################
import logging
logger = logging.getLogger('train')
logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('[%(asctime)s] [%(name)s] [%(levelname)s] %(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
###################################
###################################
###################################
DBG_MODE_ENFORCE_FLAT_HAND = False
###################################
###################################
###################################
DBG_MODE_ENFORCE_POSE_ZERO = False
###################################
###################################
###################################
UPPER_HEAD = True
# what images to save
save_dbug_Imgs = True
save_flow = True
####################################################
#################################################### Camera
res = [640, 640]
cam_lens = 60
cam_sensor = 32
####################################################
#################################################### background
# lsun_base_path = './resources/sun397_'
lsun_base_path = './resources/' #Todo: replace with path to folder containing SUN397 folder
FLAG_flip_BG = True
#####################################################
##################################################### POSITION
RANDOM_POSITION_perCHUNCK = True
#####################################################
##################################################### BACKGROUND
RANDOM_BG = True
RANDOM_BG_per_CHUNK = True
#####################################################
##################################################### TEXTURE
RANDOM_TXT = True
RANDOM_TXT_per_CHUNK = True
#####################################################
##################################################### SHAPE
RANDOM_SHAPE = True
RANDOM_SHAPE_per_CHUNK = RANDOM_SHAPE
RANDOM_SHAPE_mode = 'randomShape'
#####################################################
##################################################### LIGHTS
RANDOM_LIGHTS = True
RANDOM_LIGHTS_per_CHUNK = True
#####################################################
##################################################### HAND POSE
RANDOM_HAND_POSE = True
RANDOM_Pxl_BLUR_SIZE = True
RANDOM_CAMERA_JITTER = False
#####################################################
#####################################################
RANDOM_CAM_matrix_world = True
USE_MOTION_BLUR = True
#####################################################
##################################################### CAMERA ROT - Z
# TODO: leads to movement of people outside of scene, since they are rotated around their
# initial position
RANDOM_ROT_Z = False
RANDOM_BG_per_FRAME = not RANDOM_BG_per_CHUNK
RANDOM_TXT_per_FRAME = not RANDOM_TXT_per_CHUNK
RANDOM_LIGHTS_per_FRAME = not RANDOM_LIGHTS_per_CHUNK
n_bones = 52
total_pose_DOFs = 78
#####################################################
#####################################################
#####################################################
DBG_FLAG_writeOnDisk = False
DBG_print_MOCAP_keys = False
bg_plane_dist = 10.0
bg_plane_dist_TOLERRANCE = 0.50
bg_plane_dist_INF = 1e+10 # depth larger than cut of depth are set to this value (not used in this
stepsizeFactor = 2
FILE_segm_per_v = './resources/segm_per_v_overlap_SMPLH.pkl'
FILE_sh_original = './resources/sh.osl'
PATHs_texture_participants_GENDER_SPECIFIC = True
PATHs_texture_participants = './smpl_data' \
'/fixed_textures' \
'/<GENDER' \
'>/<CEASAR>_<GENDER>*.jpg'
PATH_MoCap_SMPLH = './smpl_data/handPoses/per_SEQ___bodyHands/'
fingertipVerticesPATH = './resources/fingertips_SMPLH.json'
data_folder = './smpl_data'
FILE_smpl_data_MOCAP = join(data_folder, 'smpl_data_20170225.npz')
data_folder_other = './resources'
FILE_smpl_data_OTHER = join(data_folder_other,
'smpl_data_ONLY_SHAPES_REGRESSORS.npz')
DBG_motion_data_filter = False
DBG_FBX_bone_names = False
DBG_segmentation = False
#####################################################
stepsize_hands = 10
frames_per_shape = 1
#####################################################
shape_totalCoeffs = 10
shape_ndofs = 10
#####################################################
#####################################################
#####################################################
####################################################
####################################################
vblur_factor_Miu = 0.03
vblur_factor_Std = 0.03
vblur_factor = 0.
def project_joints3d_imwrite(joint_3d, img, intrinsic=None, extrinsic=None, imgPATH=None,
cam_ob=None, scene=None, save_dbug_Imgs=True):
for key in list(joint_3d.keys()):
for ii in range(joint_3d[key].shape[0]):
j3d = joint_3d[key][ii, :]
j3d_4x1 = np.vstack([j3d.reshape((3, 1)), np.array([1])])
if not (intrinsic is None or extrinsic is None):
joint_2d = np.dot(np.dot(intrinsic, extrinsic), j3d_4x1)
joint_2d = np.array([int(np.round(joint_2d[0] / joint_2d[2])),
int(np.round(joint_2d[1] / joint_2d[2]))])
cv2.circle(img, tuple((joint_2d[0], joint_2d[1])), 2, (0, 255, 255), 1)
else:
render_scale = scene.render.resolution_percentage / 100
render_size = (int(scene.render.resolution_x * render_scale),
int(scene.render.resolution_y * render_scale))
joint_2d = world2cam(scene, cam_ob, Vector((j3d_4x1[:])))
joint_2d.x = joint_2d.x * render_size[0]
joint_2d.y = -joint_2d.y * render_size[1] + render_size[1] - 1
cv2.circle(img, tuple((int(np.round(joint_2d.x)), int(np.round(joint_2d.y)))), 2,
(0, 255, 255), 1)
if save_dbug_Imgs:
cv2.imwrite(imgPATH, img)
print(imgPATH)
def cam_compute_intrinsic():
res_x_px = res[0]
res_y_px = res[1]
f_mm = cam_lens
sensor_w_mm = cam_sensor
sensor_h_mm = sensor_w_mm * res_y_px / res_x_px
scale = 1
skew = 0
pixel_aspect_ratio = 1
fx_px = f_mm * res_x_px * scale / sensor_w_mm
fy_px = f_mm * res_y_px * scale * pixel_aspect_ratio / sensor_h_mm
# Center of the image
u = np.round(res_x_px * scale / 2)
v = np.round(res_y_px * scale / 2)
# Intrinsic camera matrix
K = np.array([[fx_px, skew, u], [0, fy_px, v], [0, 0, 1]])
return K
def load_hand_poses(PATH_hand_poses):
print('\n\n\n')
from glob import glob
from os.path import join
import pickle as pk
paths = sorted(glob(join(PATH_hand_poses, '*.pkl')))
print()
hand_poses = []
print('load_hand_poses')
for pp in paths:
print(pp)
with open(pp, "rb") as fin:
hand_poses_CURR = pk.load(fin, encoding='latin1')
#
# keep hands only !!!
for ii in range(len(hand_poses_CURR)):
hand_poses_CURR[ii] = hand_poses_CURR[ii][-ncomps:]
hand_poses_CURR[ii] = hand_poses_CURR[ii]
#
ignFr = 20
hand_poses.append(hand_poses_CURR[ignFr:-ignFr])
return hand_poses
def set_background(FLAG_RANDOM, bg_img, idx):
# bg_img = bg_img_INN.copy() # not needed when not in function
# with 100% probability, switch
if True: # flip_coin(.1):
bg_img.user_clear()
bpy.data.images.remove(bg_img)
#
if FLAG_RANDOM:
PATH_bg = choice(bg_paths)
else:
PATH_bg = bg_paths[0]
#
if FLAG_flip_BG:
bg_img_TMP = cv2.imread(PATH_bg)
bg_img_TMP = cv2.flip(bg_img_TMP, 1)
#
PATH_bg_TMP = join(PATH_tmp, join(str(idx), 'bg_TMP.jpg'))
if not exists(dirname(PATH_bg_TMP)):
makedirs(dirname(PATH_bg_TMP))
cv2.imwrite(PATH_bg_TMP, bg_img_TMP)
bg_img = bpy.data.images.load(PATH_bg_TMP)
bg_img = bpy.data.images.load(PATH_bg)
scene.node_tree.nodes['Image'].image = bg_img
return bg_img, PATH_bg
##############################################################################
##############################################################################
def set_txt(FLAG_RANDOM, cloth_img, txt_paths, i):
# cloth_img = cloth_img_INN.copy() # not needed when not in function
# with 100% probability, switch texture
if True: # flip_coin(.1):
cloth_img.user_clear()
bpy.data.images.remove(cloth_img)
if FLAG_RANDOM:
txt_PATH = choice(txt_paths)
else:
txt_PATH = txt_paths[0]
print(txt_PATH)
cloth_img = bpy.data.images.load(txt_PATH)
return cloth_img, txt_PATH
##############################################################################
##############################################################################
def set_shape(FLAG_RANDOM, RANDOM_mode, i, random_shapes_gendered, gender, fshapes_gendered):
if FLAG_RANDOM:
if RANDOM_mode == 'randomShapeFromDist':
shape = random_shapes_gendered[gender][i](3.) # RANDOM SHAPE from distribution
elif RANDOM_mode == 'randomShape':
shape = choice(fshapes_gendered[gender]) # RANDOM SHAPE
else:
print('\n\n\n', 'not defined - set_sh ape - RANDOM_mode', '\n\n\n')
else:
shape = fshapes[0]
return shape
##############################################################################
##############################################################################
def set_lights(RANDOM_LIGHTS):
if RANDOM_LIGHTS:
sh_coeffs = .7 * (2 * np.random.rand(9) - 1)
sh_coeffs[0] = .5 + .9 * np.random.rand() # first coeff is ambient
sh_coeffs[1] = -.7 * np.random.rand()
else:
sh_coeffs = .7 * (2 * np.ones(9) * 1.0 - 1)
sh_coeffs[0] = .4 + .7 * 0.5 # first coeff is ambient
sh_coeffs[1] = -.7 * 0.5
return sh_coeffs
def get_x_range(cam_ob, plane_ob):
cam_distance = plane_ob.location - cam_ob.location
lower_visibility_line = z_rotation(cam_distance, -cam_ob.data.angle_y)
s = cam_ob.location[1] / lower_visibility_line[1]
x_upper_bound = cam_ob.location[0] - lower_visibility_line[0] * s
if cam_ob.location[0] < -2:
x_upper_bound = 4.5
x_range = [plane_ob.location[0], x_upper_bound]
return x_range
# Check for endianness, based on <NAME>'s optical flow code.
# Using little-endian architecture, these two should be equal.
TAG_FLOAT = 202021.25
TAG_CHAR = 'PIEH'.encode()
#
def flow_write(filename, uv, v=None):
""" Write optical flow to file.
If v is None, uv is assumed to contain both u and v channels,
stacked in depth.
Original code by <NAME>, adapted from <NAME>.
"""
nBands = 2
if v is None:
uv_ = np.array(uv)
assert (uv_.ndim == 3)
if uv_.shape[0] == 2:
u = uv_[0, :, :]
v = uv_[1, :, :]
elif uv_.shape[2] == 2:
u = uv_[:, :, 0]
v = uv_[:, :, 1]
else:
print('Wrong format for flow input')
exit(-1)
else:
u = uv
assert (u.shape == v.shape)
height, width = u.shape
f = open(filename, 'wb')
# write the header
f.write(TAG_CHAR)
np.array(width).astype(np.int32).tofile(f)
np.array(height).astype(np.int32).tofile(f)
# arrange into matrix form
tmp = np.zeros((height, width * nBands))
tmp[:, np.arange(width) * 2] = u
tmp[:, np.arange(width) * 2 + 1] = v
tmp.astype(np.float32).tofile(f)
f.close()
def flow_2_img_fromFlowRawImg(flow):
xx = flow[:, :, 0].astype(np.float32) # horizontal
yy = flow[:, :, 1].astype(np.float32) # vertical
mag, ang = cv2.cartToPolar(xx, yy, angleInDegrees=True)
maxmag = 10
mag = np.clip(mag, 0, maxmag) / maxmag
hsv = np.ones((flow.shape[0], flow.shape[1], 3), dtype=np.float32)
hsv[:, :, 0] = ang
hsv[:, :, 1] = mag
bgr = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
return bgr
def render_2d_pose(PATH_OUT_pose_joints_2d_VIZ, pose_joints_2d, sizYX):
sizYX = (int(np.round(sizYX[0])), int(np.round(sizYX[1])))
IMG_OUT_2d_pose = np.zeros((sizYX[0], sizYX[1], 1), np.uint8)
for key in list(pose_joints_2d.keys()):
for ii in range(pose_joints_2d[key].shape[0]):
centerr = tuple(np.round(pose_joints_2d[key][ii, :]).astype(int))
cv2.circle(IMG_OUT_2d_pose, centerr, 0, (255, 255, 255))
cv2.imwrite(PATH_OUT_pose_joints_2d_VIZ, IMG_OUT_2d_pose)
def setState0():
for ob in bpy.data.objects.values():
ob.select = False
bpy.context.scene.objects.active = None
# create ONE MATERIAL PER PART
# as defined in a PKL with the segmentation
# this is useful to render the segmentation in a material pass
def create_segmentation(ob, person_nr):
print('creating segmentation')
mat_dict = {}
vgroups = {}
#
print('exists(' + FILE_segm_per_v + ') --> ' + str(exists(FILE_segm_per_v)))
with open(FILE_segm_per_v, 'rb') as f:
vsegm = load(f)
bpy.ops.object.material_slot_remove()
parts = sorted(vsegm.keys())
materialID_2_part = ['' for ii in range(len(part2num))]
#
for ipart, part in enumerate(parts):
vs = vsegm[part]
vgroups[part] = ob.vertex_groups.new(part)
vgroups[part].add(vs, 1.0, 'ADD')
bpy.ops.object.vertex_group_set_active(group=part)
mat_dict[part] = bpy.data.materials['Material'].copy()
mat_dict[part].pass_index = part2num[part]
bpy.ops.object.material_slot_add()
ob.material_slots[-1].material = mat_dict[part]
bpy.ops.object.mode_set(mode='EDIT')
bpy.ops.mesh.select_all(action='DESELECT')
bpy.ops.object.vertex_group_select()
bpy.ops.object.material_slot_assign()
bpy.ops.object.mode_set(mode='OBJECT')
materialID_2_part[
int((mat_dict[part].pass_index) / ((person_nr + 1) * len(parts))) - 1] = part
if DBG_segmentation:
print('create_segmentation - (id+1) - %02d // %02d - %03d' % (
ipart + 1, mat_dict[part].pass_index, mat_dict[part].pass_index * 5), ' -',
part)
if DBG_segmentation:
exit(1)
return (mat_dict, materialID_2_part)
# create the different passes that we render
def create_composite_nodes(tree, name, img=None):
###################################################################
res_paths = {k: join(PATH_tmp, name, '%s_BLENDER' % (k)) for k in (
'depth', 'normal', 'txt', 'flow', 'segm',
'object_Id')}
####################################################################
# Clear default nodes
for n in tree.nodes:
tree.nodes.remove(n)
# Create node for foreground image
#################################################
layers = tree.nodes.new('CompositorNodeRLayers')
#################################################
# Create node for background image
bg_im = tree.nodes.new('CompositorNodeImage')
if img is not None:
bg_im.image = img
FORMAT_file_FLOAT = 'OPEN_EXR' # OPEN_EXR_MULTILAYER
FORMAT_file_NON_float = 'PNG'
NAMING_CONVENTION = '#####'
# Create node for mixing foreground and background images
mix = tree.nodes.new('CompositorNodeMixRGB')
mix.use_alpha = True
# # Create node for the final output
############################################################
composite_out = tree.nodes.new('CompositorNodeComposite')
############################################################
# Create node for saving depth
depth_out = tree.nodes.new('CompositorNodeOutputFile')
depth_out.format.file_format = FORMAT_file_FLOAT
depth_out.base_path = res_paths['depth']
depth_out.file_slots[0].path = NAMING_CONVENTION
# Create node for saving normals
normals_out = tree.nodes.new('CompositorNodeOutputFile')
normals_out.format.file_format = FORMAT_file_FLOAT
normals_out.base_path = res_paths['normal']
normals_out.file_slots[0].path = NAMING_CONVENTION
# Create node for saving foreground image
txt_out = tree.nodes.new('CompositorNodeOutputFile')
txt_out.format.file_format = FORMAT_file_NON_float
txt_out.base_path = res_paths['txt']
txt_out.file_slots[0].path = NAMING_CONVENTION
flow_out = tree.nodes.new('CompositorNodeOutputFile')
flow_out.format.file_format = FORMAT_file_FLOAT
flow_out.base_path = res_paths['flow']
flow_out.file_slots[0].path = NAMING_CONVENTION
segm_out = tree.nodes.new('CompositorNodeOutputFile')
segm_out.format.file_format = FORMAT_file_FLOAT
segm_out.base_path = res_paths['segm']
segm_out.file_slots[0].path = NAMING_CONVENTION
objectid_out = tree.nodes.new('CompositorNodeOutputFile')
objectid_out.format.file_format = FORMAT_file_FLOAT
objectid_out.base_path = res_paths['object_Id']
objectid_out.file_slots[0].path = NAMING_CONVENTION
vecblur = tree.nodes.new('CompositorNodeVecBlur')
vecblur.samples = 64
vecblur.factor = vblur_factor
finalblur = tree.nodes.new('CompositorNodeBlur')
tree.links.new(layers.outputs['Image'], vecblur.inputs[0])
if '2.79' in bpy.app.version_string:
tree.links.new(layers.outputs['Depth'], vecblur.inputs[1])
tree.links.new(layers.outputs['Vector'], vecblur.inputs[2])
else:
tree.links.new(layers.outputs['Z'], vecblur.inputs[1])
tree.links.new(layers.outputs['Speed'], vecblur.inputs[2])
tree.links.new(vecblur.outputs[0], finalblur.inputs[0])
#
tree.links.new(finalblur.outputs[0], composite_out.inputs[0])
tree.links.new(finalblur.outputs[0], txt_out.inputs[0])
#
if '2.79' in bpy.app.version_string:
tree.links.new(layers.outputs['Depth'], depth_out.inputs[0])
tree.links.new(layers.outputs['Vector'], flow_out.inputs[0])
else:
tree.links.new(layers.outputs['Z'], depth_out.inputs[0])
tree.links.new(layers.outputs['Speed'], flow_out.inputs[0])
tree.links.new(layers.outputs['Normal'], normals_out.inputs[0])
tree.links.new(layers.outputs['IndexMA'], segm_out.inputs[0])
tree.links.new(layers.outputs['IndexOB'], objectid_out.inputs[0])
return (res_paths)
# Creation of the spherical harmonics material, using an OSL script
def create_sh_material(tree, PATH_sh, img=None):
# clear default nodes
for n in tree.nodes:
tree.nodes.remove(n)
uv = tree.nodes.new('ShaderNodeTexCoord')
uv.location = -800, 400
uv_im = tree.nodes.new('ShaderNodeTexImage')
uv_im.location = -400, 400
if img is not None:
uv_im.image = img
rgb = tree.nodes.new('ShaderNodeRGB')
rgb.location = -400, 200
script = tree.nodes.new('ShaderNodeScript')
script.location = -230, 400
script.mode = 'EXTERNAL'
script.filepath = PATH_sh
script.update()
# the emission node makes it independent of the scene lighting
emission = tree.nodes.new('ShaderNodeEmission')
emission.location = -60, 400
mat_out = tree.nodes.new('ShaderNodeOutputMaterial')
mat_out.location = 110, 400
tree.links.new(uv.outputs[2], uv_im.inputs[0])
tree.links.new(uv_im.outputs[0], script.inputs[0])
tree.links.new(script.outputs[0], emission.inputs[0])
tree.links.new(emission.outputs[0], mat_out.inputs[0])
def y_rotation(vector, theta):
"""Rotates 3-D vector around y-axis"""
R = np.array(
[[np.cos(theta), 0, np.sin(theta)], [0, 1, 0], [-np.sin(theta), 0, np.cos(theta)]])
return np.dot(R, vector)
def z_rotation(vector, theta):
"""Rotates 3-D vector around z-axis"""
R = np.array(
[[np.cos(theta), -np.sin(theta), 0], [np.sin(theta), np.cos(theta), 0], [0, 0, 1]])
return np.dot(R, vector)
def create_textured_plane(cam_ob, scene, res, cam_distance=9, impath=None, camera_pitch=None):
x_loc = -3
y_loc = -1
z_loc = 0
bpy.ops.mesh.primitive_plane_add(radius=3, location=(x_loc, y_loc, z_loc),
rotation=(1.5708, 1.5708, -1.5708))
cam_distance = Vector((x_loc, y_loc, z_loc)) - cam_ob.location
lower_visibility_line = z_rotation(cam_distance, cam_ob.data.angle_y / 2)
s = (cam_ob.location[0] + 3) / -lower_visibility_line[0]
y_visibl = cam_ob.location[1] + s * lower_visibility_line[1]
# select the plane
bpy.ops.object.select_all(action='DESELECT')
plane = bpy.data.objects['Plane']
plane.select = True
scene.objects.active = plane
# compute cam_distance to plane center
x = cam_ob.matrix_world[0][3]
y = cam_ob.matrix_world[1][3]
cam_distance = math.sqrt((x_loc - x) ** 2 + (y_loc - y) ** 2)
# scale plane to cover visible area
vertices = [plane.matrix_world * vert.co for vert in bpy.data.meshes['Plane'].vertices]
vertices = np.array(vertices)
max_x, max_y, max_z = np.max(vertices, axis=0)
min_x, min_y, min_z = np.min(vertices, axis=0)
scale_y = y_visibl / max_y * 1.2 # 1.2 just to make sure that it really covers the whole
# field of fiew, even when camera is shifted
scale_z = scale_y * res[0] / res[1]
bpy.ops.transform.resize(value=(1, -scale_y, scale_z))
plane_ob = bpy.data.objects['Plane']
planemat = bpy.data.materials.new('PlaneMat')
plane_ob.data.materials.append(planemat)
planemat.use_nodes = True
bpy.ops.object.editmode_toggle()
bpy.ops.uv.unwrap(method='ANGLE_BASED', margin=0.001, correct_aspect=True)
bpy.ops.object.editmode_toggle()
tree = planemat.node_tree
uv = tree.nodes.new('ShaderNodeTexCoord')
texim = tree.nodes.new('ShaderNodeTexImage')
mat = tree.nodes.new('ShaderNodeEmission')
lpath = tree.nodes.new('ShaderNodeLightPath')
out = tree.nodes['Material Output']
mix = tree.nodes.new('ShaderNodeMixShader')
if impath is not None:
im = bpy.data.images.load(impath)
texim.image = im
tree.links.new(uv.outputs[0], texim.inputs[0])
tree.links.new(texim.outputs[0], mat.inputs[0])
tree.links.new(mat.outputs[0], mix.inputs[2])
tree.links.new(lpath.outputs[0], mix.inputs[0])
tree.links.new(mix.outputs[0], out.inputs[0])
extrema = {}
extrema[' max_z'] = max_z
extrema[' max_y'] = max_y
extrema[' min_z'] = min_z
extrema[' min_y'] = min_y
extrema[' min_x'] = min_x
extrema[' min_x'] = min_x
return plane_ob, texim, extrema, lower_visibility_line
# Computes rotation matrix through Rodrigues formula as in cv2.Rodrigues
def Rodrigues(rotvec):
theta = np.linalg.norm(rotvec)
r = (rotvec / theta).reshape(3, 1) if theta > 0. else rotvec
cost = np.cos(theta)
mat = np.asarray([[0, -r[2], r[1]], [r[2], 0, -r[0]], [-r[1], r[0], 0]])
return (cost * np.eye(3) + (1 - cost) * r.dot(r.T) + np.sin(theta) * mat)
def init_scene(gender, nr=0, lower_visibility_line=None):
###########################################################################
###########################################################################
pathhh = join(data_folder, '%s_avg_noFlatHand.fbx' % gender[0])
print(pathhh)
bpy.ops.import_scene.fbx(filepath=pathhh, axis_forward='X', axis_up='Z', global_scale=100)
obname = '%s_avg' % gender[0]
##########################################################################
ob = bpy.data.objects[obname]
ob.data.use_auto_smooth = False # autosmooth creates weird artifacts
# assign the existing spherical harmonics material
ob.active_material = bpy.data.materials['Material']
# delete the default cube (which held the material)
bpy.ops.object.select_all(action='DESELECT')
if 'Cube' in bpy.data.objects.keys():
bpy.data.objects['Cube'].select = True
bpy.ops.object.delete(use_global=False)
# delete the default cube (which held the material)
bpy.ops.object.select_all(action='DESELECT')
bpy.data.objects['Lamp'].select = True
bpy.ops.object.delete(use_global=False)
cam_ob = bpy.data.objects['Camera']
#
scn = bpy.context.scene
##########################################
# if scene is set up for the first time
bpy.ops.object.select_all(action='DESELECT')
if nr == 0:
# set camera properties and initial position
scn.objects.active = cam_ob
camT_mu = 9.0
camT_stdev = 0.1
if RANDOM_CAM_matrix_world:
trr = np.random.normal(camT_mu, camT_stdev)
else:
trr = camT_mu
cam_y_offset = -np.random.rand(1) * trr # * 0.5
alpha = math.atan((trr + 3) / -cam_y_offset) # +3 is the plane distance!
camera_pitch = -math.radians(180 - 90 - math.degrees(alpha))
print(math.degrees(camera_pitch))
cam_ob.matrix_world = Matrix(((0.0, 0.0, 1.0, trr), (0.0, -1.0, 0.0, cam_y_offset - 1),
(1.0, 0.0, 0.0, 0.0), (0.0, 0.0, 0.0, 1.0)))
cam_ob.select = True
bpy.ops.transform.rotate(value=camera_pitch, axis=(0, 0, 1))
cam_ob.data.lens = cam_lens
cam_ob.data.sensor_width = cam_sensor
cam_ob.data.clip_start = 0.1
plane_ob, texim, extrema, lower_visibility_line = create_textured_plane(cam_ob, scene, res,
cam_distance=trr
+ 3,
camera_pitch=camera_pitch)
##########################################
# Setup an empty object in the center which will be the parent of the Camera
# This allows to easily rotate an object around the origin
scn.cycles.film_transparent = True
scn.render.layers["RenderLayer"].use_pass_vector = True
scn.render.layers["RenderLayer"].use_pass_normal = True
scene.render.layers['RenderLayer'].use_pass_emit = True
scene.render.layers['RenderLayer'].use_pass_emit = True
scene.render.layers['RenderLayer'].use_pass_material_index = True
scene.render.layers['RenderLayer'].use_pass_object_index = True
if '2.79' in bpy.app.version_string:
scene.render.layers['RenderLayer'].use_pass_vector = True
# Set render size
scn.render.resolution_x = res[0]
scn.render.resolution_y = res[1]
scn.render.resolution_percentage = 100
scn.render.image_settings.file_format = 'PNG'
else:
plane_ob = bpy.data.objects['Plane']
# clear existing animation data
ob.data.shape_keys.animation_data_clear()
arm_ob = bpy.data.objects['Armature']
arm_ob.animation_data_clear()
##########################################
# and translate it randomly on x-y plane
bpy.ops.object.select_all(action='DESELECT')
arm_ob.select = True
bpy.context.scene.objects.active = arm_ob
cam_distance = plane_ob.location - cam_ob.location
lower_visibility_line = z_rotation(cam_distance, -cam_ob.data.angle_y)
s = cam_ob.location[1] / lower_visibility_line[1]
x_upper_bound = cam_ob.location[0] - lower_visibility_line[0] * s
x_range = [plane_ob.location[0], x_upper_bound]
translation = random_placement_in_visibl_area(cam_ob, x_range, res)
bpy.ops.transform.translate(value=(translation[0], 0, translation[1]))
ob.name = obname + '.%03d' % nr
obname = ob.name
for name in arm_ob.pose.bones.keys():
bone = arm_ob.pose.bones.get(name)
if bone is None:
continue
bone.name = obname + name.split('avg')[-1]
# give a new name to distinguish objects later
arm_ob.name = 'Armature.%03d' % nr
arm_ob.pass_index = nr + 1
ob.pass_index = nr + 1
if not (nr == 0):
plane_ob = bpy.data.objects['Plane']
return (ob, obname, arm_ob, cam_ob, plane_ob, None, lower_visibility_line)
else:
return (ob, obname, arm_ob, cam_ob, plane_ob, texim, lower_visibility_line)
def random_placement_in_visibl_area(cam_ob, x_range=[-3, 2], res=None):
translation = np.zeros((2, 1))
translation[0] = np.random.uniform(x_range[0], x_range[1], 1)
# y equals z in blender coodinate system
visible_y_line = ray_back_projection.get_yrange(cam_ob, translation[0],
res[0], res[1])
print(visible_y_line)
translation[1] = np.random.uniform(visible_y_line[0] * float(3) / 4,
visible_y_line[1] * float(3) / 4, 1)
return translation
def vector_move(colliding_person, translation=None, y=None):
if y is None:
y = colliding_person.location[1]
print(colliding_person.location)
colliding_person.location = Vector((0, y, 0))
scene.update()
print(colliding_person.location)
vec = Vector((translation[0], y, translation[1]))
colliding_person.location = vec
scene.update()
print(colliding_person.location)
# transformation between pose and blendshapes. Final choice in SMPL paper was
# a flattened version of the rotation matrix after subtracting the identity
def rodrigues2bshapes(pose, n_bones):
rod_rots = np.asarray(pose).reshape(n_bones, 3)
mat_rots = [Rodrigues(rod_rot) for rod_rot in rod_rots]
bshapes = np.concatenate([(mat_rot - np.eye(3)).ravel() for mat_rot in mat_rots[1:]])
return (mat_rots, bshapes)
# apply trans pose and shape to character
def apply_trans_pose_shape(trans, pose, shape, ob, arm_ob, obname, scene, cam_ob, n_bones,
frame=None, init_trans=Vector((0, 0, 0)), DBG_exitIfNeeded=False):
# set z to zero, to make sure all are translated to zero-height (y-in blender)
init_trans[2] = 0
pose_coeffs = pose.copy()
pose = []
pose_coeffs[body_pose_dofs:(body_pose_dofs + ncomps)].dot(selected_components)
##################################################################
full_hand_pose = pose_coeffs[body_pose_dofs:(body_pose_dofs + ncomps)].dot(
selected_components)
##################################################################
mixed_body_full_hand_pose = np.concatenate(
(pose_coeffs[:body_pose_dofs], hands_mean + full_hand_pose))
#################################################################
pose = mixed_body_full_hand_pose.copy() # # print('body_pose_dofs = ',
##############################################
mrots, bsh = rodrigues2bshapes(pose, n_bones)
##############################################
if DBG_FBX_bone_names:
print(arm_ob.pose.bones)
for ii, bb in enumerate(arm_ob.pose.bones):
print('%02d - %-16s' % (ii, bb.name))
exit(1)
# set the location of the first bone to the translation parameter
arm_ob.pose.bones[obname + '_' + part_match['bone_00']].location = trans - init_trans
if frame is not None:
arm_ob.pose.bones[obname + '_' + part_match['root']].keyframe_insert('location',
frame=frame)
for ibone, mrot in enumerate(mrots):
bone = arm_ob.pose.bones[obname + '_' + part_match['bone_%02d' % ibone]]
bone.rotation_quaternion = Matrix(mrot).to_quaternion()
if frame is not None:
bone.keyframe_insert('rotation_quaternion', frame=frame)
bone.keyframe_insert('location', frame=frame)
# apply pose blendshapes
for ibshape, bshape in enumerate(bsh):
ob.data.shape_keys.key_blocks['Pose%03d' % ibshape].value = bshape
if frame is not None:
ob.data.shape_keys.key_blocks['Pose%03d' % ibshape].keyframe_insert('value', index=-1,
frame=frame)
for ibshape, shape_elem in enumerate(shape):
ob.data.shape_keys.key_blocks['Shape%03d' % ibshape].slider_min = -10.0
ob.data.shape_keys.key_blocks['Shape%03d' % ibshape].slider_max = 10.0
ob.data.shape_keys.key_blocks['Shape%03d' % ibshape].value = shape_elem
if frame is not None:
ob.data.shape_keys.key_blocks['Shape%03d' % ibshape].keyframe_insert('value', index=-1,
frame=frame)
return init_trans
def get_head_boundingbox(arm_ob, ob, render_size):
render_scale = scene.render.resolution_percentage / 100
render_size = (int(scene.render.resolution_x * render_scale),
int(scene.render.resolution_y * render_scale))
head_bounding_box_vertices = [486, 3975, 457, 331, 411, 3050] # corresponding to left ear,
# right ear, back of head, tip of nose, upper head, adams apple
mesh = ob.to_mesh(scene, True, 'PREVIEW')
mesh_vertices = [arm_ob.matrix_world * vert.co for vert in mesh.vertices]
bone_locations_2d = np.zeros((6, 2))
for i, vertex in enumerate(head_bounding_box_vertices):
co_2d = world2cam(scene, cam_ob,
mesh_vertices[vertex]) # triangle 411 is on the top of the head
bone_locations_2d[i, :] = [co_2d.x * render_size[0],
-co_2d.y * render_size[1] + render_size[1] - 1]
x_min, y_min = np.min(bone_locations_2d, axis=0)
x_max, y_max = np.max(bone_locations_2d, axis=0)
head_bb_dict = {'x1': x_min, 'x2': x_max, 'y1': y_min, 'y2': y_max}
return head_bb_dict
def get_upper_head(arm_ob, ob, render_size):
mesh = ob.to_mesh(scene, True, 'PREVIEW')
mesh_vertices = [arm_ob.matrix_world * vert.co for vert in mesh.vertices]
co_2d = world2cam(scene, cam_ob, mesh_vertices[411]) # triangle 411 is on the top of the head
co_3d = mesh_vertices[411]
bone_locations_2d = (co_2d.x * render_size[0], -co_2d.y * render_size[1] + render_size[1] - 1)
bone_locations_3d = (co_3d.x, co_3d.y, co_3d.z)
bpy.data.meshes.remove(mesh)
return bone_locations_2d, bone_locations_3d
def get_bone_locs(arm_ob, ob, obname, scene, cam_ob, n_bones, UPPER_HEAD):
render_scale = scene.render.resolution_percentage / 100
render_size = (int(scene.render.resolution_x * render_scale),
int(scene.render.resolution_y * render_scale))
n_fingertips = 10
if UPPER_HEAD:
bone_locations_2d = np.empty((n_bones + n_fingertips + 1, 2))
bone_locations_3d = np.empty((n_bones + n_fingertips + 1, 3), dtype='float32')
else:
bone_locations_2d = np.empty((n_bones + n_fingertips, 2))
bone_locations_3d = np.empty((n_bones + n_fingertips, 3), dtype='float32')
bone_loc_names = []
idx = 0
for ibone in range(n_bones):
bone = arm_ob.pose.bones[obname + '_' + part_match['bone_%02d' % ibone]]
if UPPER_HEAD and part_match['bone_%02d' % ibone] == 'Head':
bone_locations_2d[-1], bone_locations_3d[-1] = get_upper_head(arm_ob, ob, render_size)
infoAdd = ''
######################################################################
######################################################################
if True:
if True:
co_2d = world2cam(scene, cam_ob, arm_ob.matrix_world * bone.head)
co_3d = arm_ob.matrix_world * bone.head
bone_locations_3d[idx] = (co_3d.x, co_3d.y, co_3d.z)
bone_locations_2d[idx] = (
co_2d.x * render_size[0], -co_2d.y * render_size[1] + render_size[1] - 1)
bone_loc_names.append(part_match['bone_%02d' % ibone] + infoAdd)
#############
idx = idx + 1
########################################################################
########################################################################
endEffectorNamesList = ['index2', 'middle2', 'pinky2', 'ring2', 'thumb2']
if any(pp in part_match['bone_%02d' % ibone] for pp in endEffectorNamesList):
co_2d = world2cam(scene, cam_ob, arm_ob.matrix_world * bone.tail)
co_3d = arm_ob.matrix_world * bone.tail
#
bone_locations_3d[idx] = (co_3d.x, co_3d.y, co_3d.z)
bone_locations_2d[idx] = (
co_2d.x * render_size[0], -co_2d.y * render_size[1] + render_size[1] - 1)
#
infoAdd = '------------------------------ TAIL'
print('~~~ ibone %02d - %02d - %-15s' % (
ibone, idx, part_match['bone_%02d' % ibone]), bone_locations_2d[idx],
bone_locations_3d[idx], infoAdd)
bone_loc_names.append(part_match['bone_%02d' % ibone] + infoAdd)
###########################################################################
idx = idx + 1
return (bone_locations_2d, bone_locations_3d, bone_loc_names)
# reset the joint positions of the character according to its new shape
def reset_joint_positions(shape, ob, arm_ob, obname, scene, cam_ob, reg_ivs, joint_reg, n_bones,
total_pose_DOFs):
# Since the regression is sparse, only the relevant vertex
# elements (joint_reg) and their indices (reg_ivs) are loaded
# zero the pose and trans to obtain joint positions in zero pose
print('\n\n', 'apply_trans_pose_shape - AAA', '\n\n')
#######################################################################
_ = apply_trans_pose_shape(orig_trans, np.zeros(total_pose_DOFs), shape, ob, arm_ob, obname,
scene, cam_ob, n_bones, DBG_exitIfNeeded=False)
########################################################################
# obtain a mesh after applying modifiers
bpy.ops.wm.memory_statistics()
me = ob.to_mesh(scene, True, 'PREVIEW')
full_mesh = np.empty((len(me.vertices), 3))
for ivertexx, vertexx in enumerate(me.vertices):
full_mesh[ivertexx] = np.array(vertexx.co)
reg_vs = np.empty((len(reg_ivs), 3))
for iiv, iv in enumerate(reg_ivs):
reg_vs[iiv] = me.vertices[iv].co
bpy.data.meshes.remove(me)
# regress joint positions in rest pose
#######################################
joint_xyz = joint_reg.dot(reg_vs)
#######################################
# adapt joint positions in rest pose
arm_ob.hide = False
bpy.ops.object.mode_set(mode='EDIT')
arm_ob.hide = True
for ibone in range(n_bones):
bb = arm_ob.data.edit_bones[obname + '_' + part_match['bone_%02d' % ibone]]
bboffset = bb.tail - bb.head
bb.head = joint_xyz[ibone]
bb.tail = bb.head + bboffset # there is a Maya-Blender compatibility issue!!! - this
# will be overwritten for fingertips _2 exactly below
boneName = part_match['bone_%02d' % ibone]
if any(pp in boneName for pp in
['rring2', 'rmiddle2', 'rindex2', 'lthumb2', 'lmiddle2', 'lring2', 'rthumb2',
'lindex2', 'lpinky2', 'rpinky2']):
currFingertipVerticesNP = full_mesh[fingertipVerticesDICT[boneName[:-1]]]
bb.tail = np.average(currFingertipVerticesNP, axis=0) #
bpy.ops.object.mode_set(mode='OBJECT')
#
return (shape)
# load poses and shapes
def load_body_data(smpl_data_MOCAP, smpl_data_OTHER, ob, obname, gender, i=None):
# load moshed data from MOCAP dataset
# create a dictionary with key the sequence name and values the pose and trans
cmu_parms = {}
cmu_frames = {}
cmu_stepsize = {}
for seq in smpl_data_MOCAP.files:
if not(seq.startswith('pose')):
continue
cmu_parms[seq.replace('pose_', '')] = {'poses': smpl_data_MOCAP[seq][:,:body_pose_dofs],
'trans': smpl_data_MOCAP[
seq.replace('pose_', 'trans_')]}
cmu_frames[seq.replace('pose_', '')] = len(smpl_data_MOCAP[seq])
if 'cmu' in seq:
cmu_stepsize[seq.replace('pose_', '')] = 5
cmu_stepsize[seq.replace('pose_', '')] = 5
else:
cmu_stepsize[seq.replace('pose_', '')] = 10
print('cmu_parms')
for ii, kk in enumerate(cmu_parms.keys()):
print('%03d - %-20s' % (ii, kk))
#
if DBG_motion_data_filter:
print('len(cmu_parms) = ' + str(len(cmu_parms)))
exit(1)
n_sh_bshapes = len([k for k in ob.data.shape_keys.key_blocks.keys() if k.startswith('Shape')])
fshapes = smpl_data_OTHER['%sshapes' % gender][:, :n_sh_bshapes]
return (cmu_parms, fshapes, cmu_frames, cmu_stepsize)
def render_segmentation(PATH_BLENDER_segm, PATH_OUT_segm_PNG, PATH_OUT_segm_VIZ, save_dbug_imgs):
import numpy as np
import cv2
segm = cv2.imread(PATH_BLENDER_segm, cv2.IMREAD_ANYCOLOR | cv2.IMREAD_ANYDEPTH)
segm = np.uint8(segm)
if (len(segm.shape) == 3 and np.sum(np.abs(segm[:, :, 0] - segm[:, :, 1])) == 0 and np.sum(
np.abs(segm[:, :, 1] - segm[:, :, 2])) == 0 and np.sum(
np.abs(segm[:, :, 2] - segm[:, :, 0])) == 0):
segm = segm[:, :, 0]
segm_nonzero = segm[np.nonzero(segm)]
print('\n\n\n')
print(segm_nonzero.shape)
print('\n\n\n')
if segm_nonzero.shape[0] > 0:
VIZmultipl = np.int(np.floor(255 / np.amax(segm_nonzero)))
else:
VIZmultipl = bg_plane_dist_INF
segmVIZ = segm * VIZmultipl
if save_dbug_Imgs:
cv2.imwrite(PATH_OUT_segm_PNG, segm)
cv2.imwrite(PATH_OUT_segm_VIZ, segmVIZ)
#
return segm
def depth_2_depthVIZ(PATH_depth_EXR, PATH_OUT_depth_VIZ, DBG_verbose=False, VIZ_HACK=True):
#
depth = cv2.imread(PATH_depth_EXR, cv2.IMREAD_ANYCOLOR | cv2.IMREAD_ANYDEPTH)
#
depth_min = np.amin(depth[np.nonzero(depth)])
depth_max = np.amax(depth[np.nonzero(depth)])
#
if VIZ_HACK:
depth[depth == 1e+10] = 0
if DBG_verbose and depth[np.nonzero(depth)].shape[0] > 0:
print(np.unique(np.hstack(depth[np.nonzero(depth)])))
print(len(np.unique(np.hstack(depth[np.nonzero(depth)]))))
print(' - VIZ HACK')
#
depthVIZ = np.zeros_like(depth)
cv2.normalize(depth, depthVIZ, alpha=0., beta=255., norm_type=cv2.NORM_MINMAX,
dtype=cv2.CV_32FC1)
depthVIZ_min = 0
depthVIZ_max = 0
if depthVIZ[np.nonzero(depthVIZ)].shape[0] > 0:
depthVIZ_min = np.amin(depthVIZ[np.nonzero(depthVIZ)])
depthVIZ_max = np.amax(depthVIZ[np.nonzero(depthVIZ)])
if DBG_verbose:
print(depthVIZ.shape)
print('Non-Zero:')
print('b - depthVIZ_min =', depthVIZ_min, ' ', 'depthVIZ_max =', depthVIZ_max)
if VIZ_HACK:
depthVIZ = np.clip(depthVIZ - depthVIZ_min * 0.80, 0., 255.)
if DBG_verbose:
print('depthVIZ_min * 0.98 = ', depthVIZ_min * 0.98)
cv2.normalize(depthVIZ, depthVIZ, alpha=0., beta=255., norm_type=cv2.NORM_MINMAX,
dtype=cv2.CV_32FC1)
if DBG_verbose and depthVIZ[np.nonzero(depthVIZ)].shape[0] > 0:
depthVIZ_min = np.amin(depthVIZ[np.nonzero(depthVIZ)])
depthVIZ_max = np.amax(depthVIZ[np.nonzero(depthVIZ)])
print(depthVIZ.shape)
print('Non-Zero:')
print('c - depthVIZ_min =', depthVIZ_min, ' ', 'depthVIZ_max =', depthVIZ_max)
print(np.unique(np.hstack(depthVIZ[np.nonzero(depthVIZ)])))
print(len(np.unique(np.hstack(depthVIZ[np.nonzero(depthVIZ)]))))
cv2.imwrite(PATH_OUT_depth_VIZ, depthVIZ)
def get_annorect(annolist, head_anno, img_anno, objectpos_anno, scale_anno, pose_joints_2d,
joints_2d_vsbl, head_bbx_dict, img_path, scale_dict):
smplToMPI = [8, 5, 2, 1, 4, 7, 0, 12, 15, 62, 21, 19, 17, 16, 18, 20]
annorect = []
head_bb_anno = []
obj_pos = []
scales = []
for obname in joints_2d_vsbl.keys():
pose_joints_2d_person = pose_joints_2d[obname][smplToMPI]
joints_2d_vsbl_person = joints_2d_vsbl[obname][smplToMPI]
head_bb = head_bbx_dict[obname]
# point = []
annopoints = []
for i in range(pose_joints_2d_person.shape[0]):
point = {}
if pose_joints_2d_person[i, 0] >= 0 and pose_joints_2d_person[i, 0] <= res[0] and \
pose_joints_2d_person[i, 1] >= 0 and pose_joints_2d_person[i, 1] <= res[1]:
point = {'id': i, 'x': pose_joints_2d_person[i, 0],
'y': pose_joints_2d_person[i, 1], 'is_visible': joints_2d_vsbl_person[i]}
annopoints.append(point)
annorect.append(annopoints)
head_bb_anno.append((head_bb['x1'], head_bb['y1'], head_bb['x2'], head_bb['y2']))
obj_pos.append((pose_joints_2d[obname][9, :]))
scales.append(scale_dict[obname])
annolist.append(annorect)
head_anno.append(head_bb_anno)
img_anno.append(img_path)
objectpos_anno.append(obj_pos)
scale_anno.append(scales)
return annolist, head_anno, img_anno, objectpos_anno, scale_anno
def get_collisions(obs, obnames, arm_obs, filename, PATH_tmp_model, plane_ob, working_dir, virtualEnvDir):
plane_ob.select = True
bpy.context.scene.objects.active = plane_ob
bpy.ops.export_mesh.ply(filepath=join(PATH_tmp_model, 'plane_ob_model.ply'),
use_mesh_modifiers=False)
obnameStr = ''
for ob, obname in zip(obs, obnames):
model_path = join(PATH_tmp_model, obname + '_model.ply')
ob.select = True
bpy.context.scene.objects.active = ob
#careful, when manually exporting from blender 2.8 normals are added to faces
#newer versions of blender might have changed the default arguments
_ = bpy.ops.export_mesh.ply(filepath=model_path, use_uv_coords=False)
obnameStr = obnameStr + '+' + obname
callArgument = 'cd ' + working_dir + ' ; . ' + virtualEnvDir + '/bin/activate ; python ' + \
'./meshIsect/collision_detection_cgal_meshIsect.py ' '--modelFolder=' + \
PATH_tmp_model + ' --obnames=' + obnameStr
print(callArgument)
_ = subprocess.call(callArgument, shell=True)
PATH_tmp_intersect = PATH_tmp_model.split('models')[0]
intersection_per_ob = np.load(join(PATH_tmp_intersect, 'intersection.npy'))
plane_collision = np.load(join(PATH_tmp_intersect, 'plane_collision.npy'))
remove(join(PATH_tmp_intersect, 'plane_collision.npy'))
remove(join(PATH_tmp_intersect, 'intersection.npy'))
plane_collision = plane_collision > 0
print(plane_collision)
return intersection_per_ob, plane_collision
sorted_parts = ['global', 'leftThigh', 'rightThigh', 'spine', 'leftCalf', 'rightCalf',
'spine1', 'leftFoot', 'rightFoot', 'spine2', 'leftToes', 'rightToes', 'neck',
'leftShoulder', 'rightShoulder', 'head', 'leftUpperArm', 'rightUpperArm',
'leftForeArm', 'rightForeArm', 'leftHand', 'rightHand', 'lIndex0', 'lMiddle0',
'lPinky0', 'lRing0', 'lThumb0', 'rIndex0', 'rMiddle0', 'rPinky0', 'rRing0',
'rThumb0', 'lIndex1', 'lMiddle1', 'lPinky1', 'lRing1', 'lThumb1', 'rIndex1',
'rMiddle1', 'rPinky1', 'rRing1', 'rThumb1', 'lIndex2', 'lMiddle2', 'lPinky2',
'lRing2', 'lThumb2', 'rIndex2', 'rMiddle2', 'rPinky2', 'rRing2', 'rThumb2']
part_match = {'root': 'root', 'bone_00': 'Pelvis', # OK
'bone_01': 'L_Hip', # OK
'bone_02': 'R_Hip', # OK
'bone_03': 'Spine1', # OK
'bone_04': 'L_Knee', # OK
'bone_05': 'R_Knee', # OK
'bone_06': 'Spine2', # OK
'bone_07': 'L_Ankle', # OK
'bone_08': 'R_Ankle', # OK
'bone_09': 'Spine3', # OK
'bone_10': 'L_Foot', # OK
'bone_11': 'R_Foot', # OK
'bone_12': 'Neck', # OK
'bone_13': 'L_Collar', # OK
'bone_14': 'R_Collar', # OK
'bone_15': 'Head', # OK
'bone_16': 'L_Shoulder', # OK
'bone_17': 'R_Shoulder', # OK
'bone_18': 'L_Elbow', # OK
'bone_19': 'R_Elbow', # OK
'bone_20': 'L_Wrist', # OK
'bone_21': 'R_Wrist', # OK
'bone_22': 'lindex0', 'bone_23': 'lindex1', 'bone_24': 'lindex2',
'bone_25': 'lmiddle0', 'bone_26': 'lmiddle1', 'bone_27': 'lmiddle2',
'bone_28': 'lpinky0', 'bone_29': 'lpinky1', 'bone_30': 'lpinky2',
'bone_31': 'lring0', 'bone_32': 'lring1', 'bone_33': 'lring2',
'bone_34': 'lthumb0', 'bone_35': 'lthumb1', 'bone_36': 'lthumb2',
'bone_37': 'rindex0', 'bone_38': 'rindex1', 'bone_39': 'rindex2',
'bone_40': 'rmiddle0', 'bone_41': 'rmiddle1', 'bone_42': 'rmiddle2',
'bone_43': 'rpinky0', 'bone_44': 'rpinky1', 'bone_45': 'rpinky2',
'bone_46': 'rring0', 'bone_47': 'rring1', 'bone_48': 'rring2',
'bone_49': 'rthumb0', 'bone_50': 'rthumb1', 'bone_51': 'rthumb2'}
part2num = {part: (ipart + 1) for ipart, part in enumerate(sorted_parts)}
flat_hand_mean = True
body_pose_dofs = 66
ncomps = 12
###### load hand stuff #############################################
with open('./smpl_data/mano_v1_2/models/MANO_LEFT.pkl','rb') as infile:
manoLeft = pickle.load(infile, encoding='latin1')
hands_componentsl = manoLeft['hands_components']
hands_meanl = np.zeros(
hands_componentsl.shape[1]) if flat_hand_mean else manoLeft['hands_mean']
hands_coeffsl = manoLeft['hands_coeffs'][:, :ncomps // 2]
with open('./smpl_data/mano_v1_2/models/MANO_RIGHT.pkl','rb') as infile:
manoRight = pickle.load(infile, encoding='latin1')
hands_componentsr = manoRight['hands_components']
hands_meanr = np.zeros(
hands_componentsl.shape[1]) if flat_hand_mean else manoRight['hands_mean']
hands_coeffsr = manoRight['hands_coeffs'][:, :ncomps // 2]
selected_components = np.vstack((np.hstack(
(hands_componentsl[:ncomps // 2], np.zeros_like(hands_componentsl[:ncomps // 2]))),
# 12, 90
np.hstack((np.zeros_like(hands_componentsr[:ncomps // 2]),
hands_componentsr[:ncomps // 2]))))
hands_mean = np.concatenate((hands_meanl, hands_meanr)) # 90
########################################################################
pose_coeffs = np.zeros(body_pose_dofs + selected_components.shape[0]) # 78
########################################################################
###########################################################################
full_hand_pose = pose_coeffs[body_pose_dofs:(body_pose_dofs + ncomps)].dot(
selected_components) # 90
###########################################################################
mixed_body_full_hand_pose = np.concatenate((pose_coeffs[:body_pose_dofs],
hands_mean + full_hand_pose)) # 156 #
# #########################################################################
if __name__ == '__main__':
import sys
import h5py
import gzip
from pickle import load
from os import makedirs, system
logger.info('starting')
#################################################################
print(len(sys.argv))
for ii, arg in enumerate(sys.argv):
if arg == '--':
print(ii, arg, ' :)')
else:
if ii == 7 or ii == 8:
print(ii, arg, bool(int(arg)))
else:
print(ii, arg)
##########################################################################
print('\n\n')
logger.info(len(sys.argv))
if sys.argv[-2] == '--': # 0: Train
print('LOCAL SCRIPT ---> Will take into account the local TRAIN/VALID/TEST flag') #
# 1: Valid
idx = int(sys.argv[-1]) # 2: Test
elif sys.argv[-4] == '--' and sys.argv[-2] != '--':
logger.info('CLUSTER SCRIPT')
print('CLUSTER SCRIPT ---> Will overwrite the local TRAIN/VALID/TEST flag')
idx = int(sys.argv[-3])
######################################################
def split_ID_2_string(splitID):
switcher = {0: 'train', 1: 'valid', 2: 'test', }
return switcher.get(splitID, 'error')
######################################################
split = split_ID_2_string(int(sys.argv[-2]))
if split == 'error':
print('\n\n\n')
print('not defined - split argument')
print('\n\n\n')
exit()
######################################################
nrOfPeople = int(sys.argv[-1])
if nrOfPeople == 0:
logger.info('draw poisson sample')
nrOfPeople = np.random.poisson(10, (1,))[0]
if nrOfPeople <= 1:
nrOfPeople = 2
print('\n\n\n')
print('*** split = ', split, '***')
logger.info(str(nrOfPeople))
###################################################################
#####################################################
#####################################################
with open(fingertipVerticesPATH) as data_file:
fingertipVerticesDICT = json.load(data_file)
#####################################################
#####################################################
#####################################################
#####################################################
#####################################################
segmented_materials = True
#####################################################
#####################################################
####################################################
scene = bpy.data.scenes['Scene']
####################################################
scene.render.engine = 'CYCLES'
bpy.data.materials['Material'].use_nodes = True
scene.cycles.shading_system = True
scene.use_nodes = True
####################################################
###################################################################
###################################################################
smpl_data_MOCAP = np.load(FILE_smpl_data_MOCAP, encoding='latin1',
allow_pickle=True)
smpl_data_OTHER = np.load(FILE_smpl_data_OTHER)
###################################################################
smpl_data___joint_regressorr = smpl_data_OTHER['joint_regressor']
smpl_data___regression_verts = smpl_data_OTHER['regression_verts']
###################################################################
###################################################################
PATH_out = join(PATH_out, dataset_path_prefix, split)
PATH_tmp = join(PATH_tmp, dataset_path_prefix, split)
if not exists(PATH_out):
makedirs(PATH_out)
if not exists(PATH_tmp):
makedirs(PATH_tmp)
#######################################
#######################################
SMPLH_joint_regressorr = np.load(
join(data_folder, 'SMPLH_data_joint_regressor.npy'))
SMPLH_regression_verts = np.load(
join(data_folder, 'SMPLH_data_regression_verts.npy'))
smpl_data___joint_regressorr = SMPLH_joint_regressorr.copy()
smpl_data___regression_verts = SMPLH_regression_verts.copy()
###############################################################
# create some empty lists to store everything necessary for multi persons
obs = []
obnames = []
arm_obs = []
data = []
shapes = []
random_shapes = []
harmonics_scripts = []
materials = []
materialID_2_part = []
gender = []
cloth_img = []
cloth_img_PATHs = []
real_img = []
txt_paths = []
txt_paths_trn = []
txt_paths_val = []
txt_paths_test = []
orig_trans = []
nrOfPeople = int(nrOfPeople)
fend = np.zeros((nrOfPeople, 1))
N = np.zeros((nrOfPeople,))
random_zrot = 2 * np.pi * np.random.rand(nrOfPeople)
lower_visibility_line = None
stepsize = np.ones((nrOfPeople,), dtype=int)
dataNames = []
annolist = []
head_anno = []
image_anno = []
objectpos_anno = []
scale_anno = []
######################################################
def gender_ID_2_string(splitID):
switcher = {0: 'male', 1: 'female', }
return switcher.get(splitID, 'error')
######################################################
beta_stds_gendered = {}
random_shapes_gendered = {}
fshapes_gendered = {}
for i in range(nrOfPeople):
gender.append(gender_ID_2_string(choice([0, 1])))
######################################################
######################################################
if gender[i] == 'error':
print('\n\n\n')
print('not defined - gender argument')
print('\n\n\n')
exit()
print('*** gender = ', gender[i], '***')
print('\n\n\n')
logger.info('init_scene')
#################################################################
if lower_visibility_line is None:
ob, obname, arm_ob, cam_ob, plane_ob, teximage, lower_visibility_line = init_scene(
gender[i], i, lower_visibility_line=None)
else:
ob, obname, arm_ob, cam_ob, plane_ob, teximage, lower_visibility_line = init_scene(
gender[i], i, lower_visibility_line=lower_visibility_line)
obs.append(ob)
obnames.append(obname)
arm_obs.append(arm_ob)
if not (teximage is None):
texim = teximage
####################################
intrinsic = cam_compute_intrinsic()
####################################
split_proportion_TRAIN = 0.8
split_proportion_VALID = 0.1
split_proportion_TESTT = 0.1
assert (split_proportion_TRAIN + split_proportion_VALID + split_proportion_TESTT) == 1.0
logger.info('load body data')
#################################################################################################
cmu_parms, fshapes, cmu_frames, cmu_stepsize = load_body_data(smpl_data_MOCAP, smpl_data_OTHER,
ob, obname, gender=gender[i],
i=i)
shuffle_idx = np.load(
'./resources/random_mocap_order.npy')
shuffle_idx = np.array(shuffle_idx, dtype=int)
#################################################################################################
fshapes_NUMB = fshapes.shape[0]
xxxShapesTRN = int(round(fshapes_NUMB * (split_proportion_TRAIN)))
xxxShapesVLD = int(round(fshapes_NUMB * (split_proportion_TRAIN + split_proportion_VALID)))
if split == 'train':
fshapes_gendered[gender[i]] = fshapes[:xxxShapesTRN, :]
elif split == 'valid':
fshapes_gendered[gender[i]] = fshapes[xxxShapesTRN:xxxShapesVLD, :]
elif split == 'test':
fshapes_gendered[gender[i]] = fshapes[xxxShapesVLD:,
:]
else:
print('\n\n\n')
print('not defined - split !@#')
print('\n\n\n')
exit()
shuffle_idx = np.load('./resources/random_mocap_order_'+mocapDataName+'.npy')
shuffle_idx = np.array(shuffle_idx, dtype=int)
cmu_parms_keyz = np.array(sorted(cmu_parms.keys()))[shuffle_idx]
cmu_parms_NUMB = len(cmu_parms_keyz)
#
xxxCmuTRN = int(round(cmu_parms_NUMB * (split_proportion_TRAIN)))
xxxCmuVLD = int(round(cmu_parms_NUMB * (split_proportion_TRAIN + split_proportion_VALID)))
if split == 'train':
names = cmu_parms_keyz[:xxxCmuTRN]
elif split == 'valid':
names = cmu_parms_keyz[xxxCmuTRN:xxxCmuVLD]
elif split == 'test':
names = cmu_parms_keyz[xxxCmuVLD:]
else:
print('\n\n\n')
print('not defined - split !@#')
print('\n\n\n')
exit()
#
if i == 0:
name = sorted(names)[idx % len(names)]
filename = str(idx) # name # could also be just a number
else:
# sample cmu_mocap_sequence with probability = frames_m / frames_total
frame_ar = np.array([cmu_frames[key] / cmu_stepsize[key] for key in names])
csum = np.cumsum(frame_ar / np.sum(frame_ar))
dice = np.random.rand(1)
rand_seq = np.argmax(csum > dice) # get first occurrence of this condition
name = names[rand_seq]
dataNames.append(name)
if DBG_print_MOCAP_keys:
for idd, kk in enumerate(sorted(cmu_parms.keys())):
print('%04d - %-20s' % (idd, kk))
print(' ')
print('gender = %s' % [i])
print('len(cmu_parms) = %s' % len(cmu_parms))
print('idx = %s' % idx)
print('idx %% len = %s' % (idx % len(cmu_parms)))
print('\n\n\n')
print('DBG_print_MOCAP_keys = True --> Exiting now !!!')
print('\n\n\n')
exit(1)
###############################################################################################################
###############################################################################################################
###############################################
###############################################
hand_poses = load_hand_poses(PATH_MoCap_SMPLH)
###############################################
###############################################
hand_poses_NUMB = len(hand_poses)
#
xxxHPosesTRN = int(round(hand_poses_NUMB * (split_proportion_TRAIN)))
xxxHPosesVLD = int(round(hand_poses_NUMB * (split_proportion_TRAIN + split_proportion_VALID)))
if split == 'train': # synth flow - PATHs
hand_poses = hand_poses[:xxxHPosesTRN]
elif split == 'valid':
hand_poses = hand_poses[xxxHPosesTRN:xxxHPosesVLD]
elif split == 'test':
hand_poses = hand_poses[xxxHPosesVLD:]
else:
print('\n\n\n')
print('not defined - split @#$')
print('\n\n\n')
exit()
###############################################
###############################################
lsun_path = './resources/sun397_' + split + '.txt'
#####################################################
#####################################################
bg_paths = []
with open(lsun_path) as f:
for line in f:
bg_paths.append(join(lsun_base_path, line.strip()))
#####################################################
if RANDOM_BG:
PATH_bg = choice(bg_paths)
else:
PATH_bg = bg_paths[0]
#####################################################
bg_img = bpy.data.images.load(PATH_bg)
#####################################################
PATH_tmp_models = join(PATH_tmp, filename, 'models')
print(PATH_tmp_models)
if not exists(PATH_tmp_models):
makedirs(PATH_tmp_models)
else:
fileList = listdir(PATH_tmp_models)
for fi in fileList:
remove(join(PATH_tmp_models, fi))
caesar_txt_paths = sorted(glob(
(PATHs_texture_participants.replace('<CEASAR>', 'grey')).replace('<GENDER>', gender[i])))
noncaesar_txt_paths = sorted(glob(
(PATHs_texture_participants.replace('<CEASAR>', 'nongrey')).replace('<GENDER>',
gender[i])))
#
ncaesar = len(caesar_txt_paths)
nnoncaesar = len(noncaesar_txt_paths)
print(noncaesar_txt_paths)
#
xxxCeasarTRN = int(round(ncaesar * (split_proportion_TRAIN)))
xxxCeasarVLD = int(round(ncaesar * (split_proportion_TRAIN + split_proportion_VALID)))
#
xxxNonCeasarTRN = int(round(nnoncaesar * (split_proportion_TRAIN)))
xxxNonCeasarVLD = int(round(nnoncaesar * (split_proportion_TRAIN + split_proportion_VALID)))
#
print(xxxCeasarTRN)
print(xxxCeasarVLD)
print(xxxNonCeasarTRN)
print(xxxNonCeasarVLD)
print('----')
print(ncaesar)
print(nnoncaesar)
non_ceasar_ceasar_ratio = 8
if ncaesar >= nnoncaesar:
if split == 'train':
txt_paths.append(caesar_txt_paths[:xxxCeasarTRN] + (
non_ceasar_ceasar_ratio * (ncaesar // nnoncaesar) * noncaesar_txt_paths[
:xxxNonCeasarTRN]))
elif split == 'valid':
txt_paths.append(caesar_txt_paths[xxxCeasarTRN:xxxCeasarVLD] + (
non_ceasar_ceasar_ratio * (ncaesar // nnoncaesar) * noncaesar_txt_paths[
xxxNonCeasarTRN:xxxNonCeasarVLD]))
elif split == 'test':
txt_paths.append(caesar_txt_paths[xxxCeasarVLD:] + (
non_ceasar_ceasar_ratio * (ncaesar // nnoncaesar) * noncaesar_txt_paths[
xxxNonCeasarVLD:]))
else:
print('\n\n\n')
print('not defined - split $%^')
print('\n\n\n')
exit()
else:
if split == 'train':
print(len(int(np.round(
1 / non_ceasar_ceasar_ratio * (nnoncaesar // ncaesar))) * caesar_txt_paths[
:xxxCeasarTRN]))
print(len(noncaesar_txt_paths[:xxxNonCeasarTRN]))
txt_paths.append(int(np.round(
1 / non_ceasar_ceasar_ratio * (nnoncaesar // ncaesar))) * caesar_txt_paths[:xxxCeasarTRN] + noncaesar_txt_paths[:xxxNonCeasarTRN])
elif split == 'valid':
txt_paths.append(int(np.round(
1 / non_ceasar_ceasar_ratio * (nnoncaesar // ncaesar))) * caesar_txt_paths[xxxCeasarTRN:xxxCeasarVLD] + noncaesar_txt_paths[xxxNonCeasarTRN:xxxNonCeasarVLD])
elif split == 'test':
txt_paths.append(int(np.round(
1 / non_ceasar_ceasar_ratio * (nnoncaesar // ncaesar))) * caesar_txt_paths[xxxCeasarVLD:] + noncaesar_txt_paths[xxxNonCeasarVLD:])
else:
print('\n\n\n')
print('not defined - split $%^')
print('\n\n\n')
exit()
print('---final textures')
####################################################
# PUT INITIAL CLOTHING
cloth_img_name = choice(txt_paths[i])
cloth_img.append(bpy.data.images.load(cloth_img_name))
####################################################
# grab random textures from existing participants
real_img.append(cloth_img[i])
# this texture holds the part segmentation.
# Ideally it should be directly done with face colors
####################################################
####################################################
####################################################
mat_tree = bpy.data.materials['Material'].node_tree
####################################################
# Create copy-spher.harm. directory if not exist
####################################################
PATH_OUT_sh = join(PATH_out, filename, 'spher_harm', 'sh.osl')
####################################################
if not exists(dirname(PATH_OUT_sh)):
makedirs(dirname(PATH_OUT_sh))
system('cp %s %s' % (FILE_sh_original.replace(' ', '\ '), PATH_OUT_sh.replace(' ', '\ ')))
####################################################
create_sh_material(mat_tree, PATH_OUT_sh, img=real_img[i])
####################################################
res_paths = create_composite_nodes(scene.node_tree, filename, img=bg_img)
####################################################
setState0()
ob.select = True
bpy.context.scene.objects.active = ob
#
# create material segmentation
if segmented_materials:
mat, matID_2_part = create_segmentation(ob, i)
materials.append(mat)
materialID_2_part.append(matID_2_part)
#
prob_dressed = {'global': .01, 'leftThigh': .9, 'rightThigh': .9, 'spine': .9,
'leftCalf': .5, 'rightCalf': .5, 'spine1': .9, 'leftFoot': .9,
'rightFoot': .9, 'spine2': .9, 'leftToes': .9, 'rightToes': .9,
'neck': .01, 'leftShoulder': .8, 'rightShoulder': .8, 'head': .01,
'leftUpperArm': .5, 'rightUpperArm': .5, 'leftForeArm': .5,
'rightForeArm': .5, 'leftHand': .01, 'rightHand': .01, 'lIndex0': .01,
'lMiddle0': .01, 'lPinky0': .01, 'lRing0': .01, 'lThumb0': .01,
'rIndex0': .01, 'rMiddle0': .01, 'rPinky0': .01, 'rRing0': .01,
'rThumb0': .01, 'lIndex1': .01, 'lMiddle1': .01, 'lPinky1': .01,
'lRing1': .01, 'lThumb1': .01, 'rIndex1': .01, 'rMiddle1': .01,
'rPinky1': .01, 'rRing1': .01, 'rThumb1': .01, 'lIndex2': .01,
'lMiddle2': .01, 'lPinky2': .01, 'lRing2': .01, 'lThumb2': .01,
'rIndex2': .01, 'rMiddle2': .01, 'rPinky2': .01, 'rRing2': .01,
'rThumb2': .01}
else:
materials.append({'FullBody': bpy.data.materials['Material']})
prob_dressed = {'FullBody': 1.}
##########################################################################################################################
orig_pelvis_loc = (arm_ob.matrix_world.copy() * arm_ob.pose.bones[
obname + '_' + part_match['bone_00']].head.copy()) - Vector((-1., 1., 1.))
##########################################################################################################################
orig_cam_loc = cam_ob.location.copy()
##########################################################################################################################
###################################################################
###################################################################
beta_stds_gendered[gender[i]] = np.load(
join(data_folder, ('%s_beta_stds.npy' % gender[i]))) #
####################################################
####################################################
random_shapes_gendered[gender[i]] = []
random_shapes_gendered[gender[i]].append(
lambda std: np.concatenate((np.random.uniform(-std, std,
size=(shape_ndofs,)) *
beta_stds_gendered[gender[i]][:shape_ndofs],
np.zeros(shape_totalCoeffs - shape_ndofs)))) # 64
scene.objects.active = arm_obs[i]
orig_trans = np.asarray(
arm_obs[i].pose.bones[obname + '_' + part_match['bone_00']].location).copy()
# The spherical harmonics material needs a script to be loaded
scs = []
for mname, material in materials[i].items():
scs.append(material.node_tree.nodes['Script'])
scs[-1].filepath = PATH_OUT_sh
scs[-1].update()
harmonics_scripts.append(scs)
flip_coin = lambda prob: np.random.rand() < prob
data.append(cmu_parms[name])
stepsize[i] = int(cmu_stepsize[name]) * stepsizeFactor
N[i] = len(data[i]['poses'][::stepsize[i]])
#####################################################
get_real_frame = lambda ifr: ifr # ifr*stepsize
random_camera_trans = Vector((0., 0., 0.))
random_camera_rotX = 0
random_camera_rotY = 0
random_camera_rotZ = 0
random_camera_rotX_ACCUM = 0.
random_camera_rotY_ACCUM = 0.
random_camera_rotZ_ACCUM = 0.
reset_loc = False
flip_coin_perCHUNK = False ############################################################
# ############################################################
# ############################################################
# create a sequence of frames_per_shape with a single shape and rotation, with keyframes #
# keyframe animation is important to obtain the flow
min_N_idx = np.argmin(N)
min_N = N[min_N_idx]
ishapes = range(int(np.ceil(float(min_N) / float(frames_per_shape))))
fbegin = np.zeros((len(ishapes), nrOfPeople))
fend = np.zeros((len(ishapes), nrOfPeople))
reset_loc = np.zeros((nrOfPeople, 1), dtype=bool)
init_trans = []
rand_start_frame = np.zeros((nrOfPeople,))
for i, n in enumerate(N):
if n == min_N:
rand_start_frame[i] = 0
else:
rand_start_frame[i] = np.random.randint(0, n - min_N)
print(N)
print(dataNames)
############################################################
logger.info('start ishapes loop')
for ishape in ishapes:
for i in range(nrOfPeople):
bpy.ops.object.select_all(action='DESELECT')
arm_obs[i].select = True
bpy.context.scene.objects.active = arm_obs[i]
if RANDOM_SHAPE_per_CHUNK:
if i == 0:
shapes = []
shapes.append(
set_shape(RANDOM_SHAPE, RANDOM_SHAPE_mode, i, random_shapes_gendered, gender[i],
fshapes_gendered))
#####################################################
curr_shape = reset_joint_positions(shapes[i], obs[i], arm_obs[i], obnames[i], scene,
cam_ob, smpl_data___regression_verts,
smpl_data___joint_regressorr, n_bones, total_pose_DOFs)
if RANDOM_ROT_Z:
random_zrot[i] = 2 * np.pi * np.random.rand() # 0...1
bpy.ops.object.select_all(action='DESELECT')
def degree2rad(deg):
rad = deg * np.pi / 180.
return rad
if RANDOM_CAMERA_JITTER:
flip_coin_perCHUNK = flip_coin(.3)
random_camera_trans = .005 * np.random.randn(3)
if RANDOM_POSITION_perCHUNCK and not (ishape == ishapes[0]):
x_range = get_x_range(cam_ob, plane_ob)
for person in arm_obs:
bpy.ops.object.select_all(action='DESELECT')
person.select = True
bpy.context.scene.objects.active = person
translation = random_placement_in_visibl_area(cam_ob, x_range, res)
vector_move(person, translation)
for i in range(nrOfPeople):
if N[i] == min_N:
fbegin[ishape, i] = ishape * stepsize[i] * frames_per_shape
fend[ishape, i] = min((ishape + 1) * stepsize[i] * frames_per_shape,
len(data[i]['poses']))
else:
fbegin[ishape, i] = ishape * stepsize[i] * frames_per_shape + rand_start_frame[i]
fend[ishape, i] = (ishape + 1) * stepsize[i] * frames_per_shape + rand_start_frame[i]
for arm_ob in arm_obs:
arm_ob.animation_data_clear()
cam_ob.animation_data_clear()
for i in range(nrOfPeople):
if RANDOM_TXT_per_CHUNK:
cloth_img[i], cloth_img_PATH = set_txt(RANDOM_TXT, cloth_img[i], txt_paths[i], i)
cloth_img_PATHs.append(cloth_img_PATH)
if RANDOM_BG_per_CHUNK:
bg_img, bg_img_PATH = set_background(RANDOM_BG, bg_img, idx)
if RANDOM_LIGHTS_per_CHUNK:
sh_coeffs = set_lights(RANDOM_LIGHTS)
#############################################
#############################################
blur = scene.node_tree.nodes['Blur']
if RANDOM_Pxl_BLUR_SIZE:
blur.size_x = int(np.round(1.0 * np.random.randn()))
blur.size_y = int(np.round(1.0 * np.random.randn()))
else:
blur.size_x = 0.
blur.size_y = 0.
##########################################################################
##########################################################################
vblur_factor = np.random.normal(vblur_factor_Miu, vblur_factor_Std)
if not USE_MOTION_BLUR:
vblur_factor = 0.
##########################################################################
scene.node_tree.nodes['Vector Blur'].factor = vblur_factor
##########################################################################
##########################################################################
allHandsOnly_poses = []
handsOnly_poses = []
for personIDX in range(nrOfPeople):
handSeqID = choice(range(len(hand_poses)))
lennn = len(hand_poses[handSeqID])
sampledHandFrameIDs = range(lennn)[::stepsize_hands]
startSamplingFrID = choice(range(len(sampledHandFrameIDs) - frames_per_shape + 1))
#
for sampledHandFrameID in sampledHandFrameIDs:
if RANDOM_HAND_POSE:
curr_hand_pose = hand_poses[handSeqID][sampledHandFrameID]
else:
handSeqID = 0
curr_hand_pose = hand_poses[handSeqID][0]
if DBG_MODE_ENFORCE_FLAT_HAND:
curr_hand_pose = np.zeros_like(hand_poses[0][0])
#
handsOnly_poses.append(curr_hand_pose)
allHandsOnly_poses.append(handsOnly_poses)
###########################
STORED_ITER_seq_frame = []
STORED_ITER_pose = []
STORED_ITER_trans = []
###########################
N_min = np.min(N)
mins = np.zeros((nrOfPeople,))
for i in range(nrOfPeople):
mins[i] = len(np.arange(fbegin[ishape, i], fend[ishape, i], stepsize[i]))
N_min = min(mins)
###########################
print('done with allocating - searching for a valid configuration without collisions')
print(fbegin)
print(fend)
print(stepsize)
print(mins)
###########################
collision = True
# repeat until configuration without collisions is found
combs = np.linspace(0, nrOfPeople - 1, nrOfPeople, dtype=int)
pairs = list(combinations(combs, 2))
logger.info('start collision free assembling')
while collision:
logger.info('not found yet')
collisions_per_pair = np.zeros((len(pairs),), dtype=int)
plane_collision = np.zeros((nrOfPeople,), dtype=int)
STORED_ITER_seq_frame_temp = []
STORED_ITER_pose_temp = []
STORED_ITER_trans_temp = []
for seq_frame in range(int(N_min)):
# get empty lists for collision detection of meshes
meshes = []
mesh_vertices = []
start_frame = np.zeros((len(arm_obs),), dtype=int)
pose_list = []
trans_list = []
print('seqFrame=' + str(seq_frame))
###############################################
iframe = seq_frame + ishape * frames_per_shape
###############################################
scene.frame_set(get_real_frame(seq_frame))
###############################################
for i, (arm_ob, pose_handsOnly) in enumerate(zip(arm_obs, allHandsOnly_poses)):
bpy.ops.object.select_all(action='DESELECT')
arm_ob.select = True
bpy.context.scene.objects.active = arm_ob
obs[i].select = True
bpy.context.scene.objects.active = obs[i]
pose_handsOnly = np.array(pose_handsOnly[i])
pose_list.append(
data[i]['poses'][int(fbegin[ishape, i]):int(fend[ishape, i]):stepsize[i]][
seq_frame])
trans_list.append(
data[i]['trans'][int(fbegin[ishape, i]):int(fend[ishape, i]):stepsize[i]][
seq_frame])
if len(pose_list[i]) < total_pose_DOFs:
pose_list[i] = np.hstack((pose_list[i][:body_pose_dofs], pose_handsOnly))
assert (pose_list[i].shape[0] == body_pose_dofs + ncomps)
if DBG_MODE_ENFORCE_POSE_ZERO:
pose_list[i][3:] = 0
if seq_frame == 0:
init_trans.append(Vector(trans_list[i]) + arm_ob.pose.bones[0].tail)
init_trans[i] = apply_trans_pose_shape(Vector(trans_list[i]), pose_list[i],
shapes[i], obs[i], arm_obs[i], obnames[i],
scene, cam_ob, n_bones,
get_real_frame(seq_frame), init_trans[i],
True)
###############################
arm_obs[i].pose.bones[
obnames[i] + '_' + part_match['root']].rotation_quaternion = Quaternion(
Euler((0, 0, random_zrot[i]), 'XYZ'))
arm_obs[i].pose.bones[obnames[i] + '_' + part_match['root']].keyframe_insert(
'rotation_quaternion', frame=get_real_frame(seq_frame))
meshes.append(obs[i].to_mesh(scene, True, 'PREVIEW'))
mesh_vertices.append(
[obs[i].matrix_world * vert.co for vert in meshes[i].vertices])
scene.update()
if RANDOM_CAMERA_JITTER and flip_coin_perCHUNK:
cam_ob_origloc = cam_ob.location.copy()
cam_ob.location = cam_ob_origloc + Vector(random_camera_trans)
cam_ob.keyframe_insert('location', frame=get_real_frame(seq_frame))
cam_ob.location = cam_ob_origloc
STORED_ITER_seq_frame_temp.append(seq_frame)
STORED_ITER_pose_temp.append(pose_list)
STORED_ITER_trans_temp.append(trans_list)
#########################################
#########################################
# check for collisions
x_range = get_x_range(cam_ob, plane_ob)
scene.update()
_, _, plane_collision_seq = collisionDetector.mesh_collision(
mesh_vertices, meshes,pairs, min_max_x=(x_range[0], x_range[1]))
plane_collision_seq = np.squeeze(plane_collision_seq)
collisions_per_ob, _ = get_collisions(obs, obnames, arm_obs, filename, PATH_tmp_models,
plane_ob, working_dir, collisionVirtualenv)
collisions_per_pair += np.array(collisions_per_ob, dtype=int)
print(plane_collision)
plane_collision += np.array(plane_collision_seq)
print(collisions_per_ob)
# delete all meshes created for collision detection
for mesh in meshes:
bpy.data.meshes.remove(mesh)
#########################################
collision_count = np.sum(collisions_per_pair)
#########################################
# resolve colision if any collision between a person with a person or a person with the
# background plane was detected
if collision_count > 0 or any(plane_collision):
for seq_frame in range(int(N_min)):
scene.frame_set(get_real_frame(seq_frame))
cam_ob.animation_data_clear()
for arm_ob in arm_obs:
arm_ob.animation_data_clear()
######################################
################################
# loop over pairs of people and replace them until no more collisions from last
# animation
while (collision_count > 0 or any(plane_collision)):
print(collisions_per_pair)
# select one person from pair with largest number of collisions
if any(plane_collision):
for j, pl_col in enumerate(plane_collision):
if pl_col:
max_idx = j
else:
pair_idx = np.argmax(collisions_per_pair)
max_idx = pairs[pair_idx][np.random.randint(0, 2)]
colliding_person = arm_obs[max_idx]
bpy.ops.object.select_all(action='DESELECT')
colliding_person.select = True
bpy.context.scene.objects.active = colliding_person
# set new initial pose to a new random position in the visible area
x_range = get_x_range(cam_ob, plane_ob)
translation = random_placement_in_visibl_area(cam_ob, x_range, res)
vector_move(colliding_person, translation)
print(colliding_person.name)
################################
# update collision counts
for j, tpl in enumerate(pairs):
if max_idx in tpl:
collision_count -= collisions_per_pair[j]
collisions_per_pair[j] = 0
plane_collision[max_idx] = 0
else:
collision = False
# add the data from the valid sequence to the lists to be rendered,
for temp_frame, temp_pose, temp_trans in zip(STORED_ITER_seq_frame_temp, STORED_ITER_pose_temp,
STORED_ITER_trans_temp):
STORED_ITER_seq_frame.append(temp_frame)
STORED_ITER_pose.append(temp_pose)
STORED_ITER_trans.append(temp_trans)
#########################################
print(STORED_ITER_seq_frame)
#####################################################
#####################################################
# iterate over the keyframes (here set manually) and render
for _idx_ in range(len(STORED_ITER_seq_frame)):
################################################### ***
seq_frame = STORED_ITER_seq_frame[_idx_]
pose = STORED_ITER_pose[_idx_]
trans = STORED_ITER_trans[_idx_]
###############################################
iframe = seq_frame + ishape * frames_per_shape
###############################################
scene.frame_set(get_real_frame(seq_frame))
###############################################
#####################################################
PATH_INN_depth_EXR = join(res_paths['depth'], '%05d.exr' % (seq_frame))
PATH_OUT_depth_EXR = join(PATH_out, join(filename, 'depth_EXR', '%05d.exr' % (iframe)))
PATH_INN_normals_EXR = join(res_paths['normal'], '%05d.exr' % (seq_frame))
PATH_OUT_normals_EXR = join(PATH_out, join(filename, 'normal_EXR', '%05d.exr' % (iframe)))
PATH_OUT_normals_VIZ = join(PATH_out, join(filename, 'normal_VIZ', '%05d.png' % (iframe)))
PATH_INN_flow_EXR = join(res_paths['flow'], '%05d.exr' % (seq_frame))
PATH_OUT_flow_FLOW_fake = join(PATH_out,
join(filename, 'flow_fake', '%05d.flo' % (iframe)))
PATH_OUT_flow_FLOW_real = join(PATH_out, join(filename, 'flow', '%05d.flo' % (iframe)))
PATH_OUT_flow_VIZ = join(PATH_out, join(filename, 'flow_VIZ', '%05d.png' % (iframe)))
PATH_INN_segm_EXR = join(res_paths['segm'], '%05d.exr' % (seq_frame))
PATH_OUT_segm_EXR = join(PATH_out, join(filename, 'segm_EXR', '%05d.exr' % (iframe)))
PATH_INN_objectID_EXR = join(res_paths['object_Id'], '%05d.exr' % (seq_frame))
PATH_OUT_objectID_EXR = join(PATH_out,
join(filename, 'objectId_EXR', '%05d.exr' % (iframe)))
PATH_OUT_objectID_obname = join(PATH_out,
join(filename, 'objectId_obname', '%05d.exr' % (iframe)))
PATH_OUT_depth_VIZ = join(PATH_out, join(filename, 'depth_VIZ', '%05d.png' % (iframe)))
PATH_OUT_segm_PNG = join(PATH_out, join(filename, 'segm_PNG', '%05d.png' % (iframe)))
PATH_OUT_segm_VIZ = join(PATH_out, join(filename, 'segm_VIZ',
'%05d.png' % (iframe))) # (name, iframe)))
PATH_OUT_full_textBG = join(PATH_out, join(filename, 'composition',
'%05d.png' % (iframe))) # (name, iframe)))
PATH_OUT_pose_joints_2d = join(PATH_out, join(filename, 'pose_joints_2d',
'%05d.npy' % (iframe))) # (name, iframe)))
PATH_OUT_pose_joints_2d_VIZ = join(PATH_out, join(filename, 'pose_joints_2d_VIZ',
'%05d.png' % (iframe)))
PATH_OUT_pose_joints_3d = join(PATH_out, join(filename, 'pose_joints_3d',
'%05d.npy' % (iframe))) # (name, iframe)))
PATH_OUT_pose_joints_3d_VIZ = join(PATH_out, join(filename, 'pose_joints_3d_VIZ',
'%05d.png' % (
iframe))) # (name, iframe)))
PATH_OUT_pose_joints_VSBL = join(PATH_out, join(filename, 'pose_joints_VSBL',
'%05d.npy' % (iframe))) # (name, iframe)))
PATH_OUT_shape = join(PATH_out, join(filename, 'shapes',
'%05d.npy' % (iframe))) # (name, iframe)))
PATH_OUT_pose_fullPCA = join(PATH_out, join(filename, 'pose_coeffs_fullPCA',
'%05d.npy' % (iframe))) # (name, iframe)))
PATH_OUT_pose_fullFull = join(PATH_out, join(filename, 'pose_coeffs_fullFull',
'%05d.npy' % (iframe))) # (name, iframe)))
PATH_OUT_BB_handsBody_crn = join(PATH_out, join(filename, 'cropHandsBody_BBs',
'%05d.npy' % (iframe)))
PATH_OUT_gender = join(PATH_out, join(filename, 'gender', '%05d.txt' % (iframe)))
PATH_OUT_HandVisibilityLR = join(PATH_out,
join(filename, 'hand_visibility', '%05d.npy' % (iframe)))
PATH_OUT_DBG = join(PATH_out, join(filename, 'azzzDBG', '%05d.txt' % (iframe)))
PATH_OUT_Cam_RT_4x4 = join(PATH_out,
join(filename, 'camera_RT_4x4', '%05d.npy' % (iframe)))
PATH_OUT_Cam_RT_4x4_txt = join(PATH_out,
join(filename, 'camera_RT_4x4', '%05d.txt' % (iframe)))
PATH_OUT_Subj_Pelvis_T = join(PATH_out,
join(filename, 'subj_pelvis_T', '%05d.npy' % (iframe)))
PATH_OUT_Subj_ZRot = join(PATH_out, join(filename, 'subj_ZRot', '%05d.npy' % (iframe)))
PATH_OUT_BLUR_parms = join(PATH_out, join(filename, 'blur_parms', '%05d.npy' % (iframe)))
PATH_OUT_BLUR_parmsTxt = join(PATH_out,
join(filename, 'blur_parms', '%05d.txt' % (iframe)))
PATH_OUT_bg_img_PATH = join(PATH_out,
join(filename, 'img_path_BGround', '%05d.txt' % (iframe)))
PATH_OUT_txt_img_PATH = join(PATH_out,
join(filename, 'img_path_Texture', '%05d.txt' % (iframe)))
PATH_OUT_scale = join(PATH_out, join(filename, 'scale', '%05d' % (iframe)))
PATH_OUT_bbx_head = join(PATH_out, join(filename, 'bbx_head', '%05d' % (iframe)))
PATH_OUT_bbx_head_img = join(PATH_out, join(filename, 'bbx_head', '%05d' % (iframe)))
PATH_OUT_ANNOLIST = join(PATH_out, join(filename, 'annolist'))
PATH_OUT_ANNOLIST_HEAD = join(PATH_out, join(filename, 'annolist_head'))
PATH_OUT_ANNOLIST_img = join(PATH_out, join(filename, 'annolist_img'))
PATH_OUT_ANNOLIST_objpos = join(PATH_out, join(filename, 'annolist_objpos'))
PATH_OUT_ANNOLIST_scale = join(PATH_out, join(filename, 'annolist_scale'))
#
if not exists(dirname(PATH_OUT_BB_handsBody_crn)):
makedirs(dirname(PATH_OUT_BB_handsBody_crn))
if not exists(dirname(PATH_OUT_full_textBG)):
makedirs(dirname(PATH_OUT_full_textBG))
if not exists(dirname(PATH_OUT_pose_joints_2d)):
makedirs(dirname(PATH_OUT_pose_joints_2d))
if not exists(dirname(PATH_OUT_pose_joints_2d_VIZ)):
makedirs(dirname(PATH_OUT_pose_joints_2d_VIZ))
if not exists(dirname(PATH_OUT_pose_joints_3d)):
makedirs(dirname(PATH_OUT_pose_joints_3d))
if not exists(dirname(PATH_OUT_pose_joints_3d_VIZ)):
makedirs(dirname(PATH_OUT_pose_joints_3d_VIZ))
if not exists(dirname(PATH_OUT_pose_joints_VSBL)):
makedirs(dirname(PATH_OUT_pose_joints_VSBL))
if not exists(dirname(PATH_OUT_segm_PNG)):
makedirs(dirname(PATH_OUT_segm_PNG))
if not exists(dirname(PATH_OUT_segm_VIZ)):
makedirs(dirname(PATH_OUT_segm_VIZ))
if not exists(dirname(PATH_OUT_depth_VIZ)):
makedirs(dirname(PATH_OUT_depth_VIZ))
if not exists(dirname(PATH_OUT_shape)):
makedirs(dirname(PATH_OUT_shape))
if not exists(dirname(PATH_OUT_pose_fullPCA)):
makedirs(dirname(PATH_OUT_pose_fullPCA))
if not exists(dirname(PATH_OUT_pose_fullFull)):
makedirs(dirname(PATH_OUT_pose_fullFull))
if not exists(dirname(PATH_OUT_depth_EXR)):
makedirs(dirname(PATH_OUT_depth_EXR))
if not exists(dirname(PATH_OUT_normals_EXR)):
makedirs(dirname(PATH_OUT_normals_EXR))
if not exists(dirname(PATH_OUT_normals_VIZ)):
makedirs(dirname(PATH_OUT_normals_VIZ))
if not exists(dirname(PATH_OUT_flow_FLOW_fake)):
makedirs(dirname(PATH_OUT_flow_FLOW_fake))
if not exists(dirname(PATH_OUT_flow_FLOW_real)):
makedirs(dirname(PATH_OUT_flow_FLOW_real))
if not exists(dirname(PATH_OUT_flow_VIZ)):
makedirs(dirname(PATH_OUT_flow_VIZ))
if not exists(dirname(PATH_OUT_segm_EXR)):
makedirs(dirname(PATH_OUT_segm_EXR))
if not exists(dirname(PATH_OUT_objectID_EXR)):
makedirs(dirname(PATH_OUT_objectID_EXR))
if not exists(dirname(PATH_OUT_objectID_obname)):
makedirs(dirname(PATH_OUT_objectID_obname))
if not exists(dirname(PATH_OUT_gender)):
makedirs(dirname(PATH_OUT_gender))
if not exists(dirname(PATH_OUT_HandVisibilityLR)):
makedirs(dirname(PATH_OUT_HandVisibilityLR))
if not exists(dirname(PATH_OUT_DBG)) and DBG_FLAG_writeOnDisk:
makedirs(dirname(PATH_OUT_DBG))
if not exists(dirname(PATH_OUT_BLUR_parms)):
makedirs(dirname(PATH_OUT_BLUR_parms))
if not exists(dirname(PATH_OUT_Cam_RT_4x4)):
makedirs(dirname(PATH_OUT_Cam_RT_4x4))
if not exists(dirname(PATH_OUT_Subj_Pelvis_T)):
makedirs(dirname(PATH_OUT_Subj_Pelvis_T))
if not exists(dirname(PATH_OUT_Subj_ZRot)):
makedirs(dirname(PATH_OUT_Subj_ZRot))
if not exists(dirname(PATH_OUT_bg_img_PATH)):
makedirs(dirname(PATH_OUT_bg_img_PATH))
if not exists(dirname(PATH_OUT_txt_img_PATH)):
makedirs(dirname(PATH_OUT_txt_img_PATH))
if not exists(dirname(PATH_OUT_scale)):
makedirs(dirname(PATH_OUT_scale))
if not exists(dirname(PATH_OUT_bbx_head)):
makedirs(dirname(PATH_OUT_bbx_head))
if not (exists(dirname(PATH_OUT_ANNOLIST))):
makedirs(dirname(PATH_OUT_ANNOLIST))
if not (exists(dirname(PATH_OUT_ANNOLIST_HEAD))):
makedirs(dirname(PATH_OUT_ANNOLIST_HEAD))
if not (exists(dirname(PATH_OUT_ANNOLIST_img))):
makedirs(dirname(PATH_OUT_ANNOLIST_img))
if not (exists(dirname(PATH_OUT_ANNOLIST_objpos))):
makedirs(dirname(PATH_OUT_ANNOLIST_objpos))
if not (exists(dirname(PATH_OUT_ANNOLIST_scale))):
makedirs(dirname(PATH_OUT_ANNOLIST_scale))
for i in range(nrOfPeople):
# not tested for multi person
if RANDOM_TXT_per_FRAME:
cloth_img[i], cloth_img_PATH = set_txt(RANDOM_TXT, cloth_img[i], txt_paths[i], i)
if RANDOM_BG_per_FRAME:
bg_img, bg_img_PATH = set_background(RANDOM_BG, bg_img, idx)
#####################
texim.image = bg_img
#####################
color = None
for i in range(nrOfPeople):
for mname, material in materials[i].items():
mtree = material.node_tree
if color is not None:
print(' - color is not None - EXITING')
exit(1)
mtree.links.new(mtree.nodes['RGB'].outputs[0], mtree.nodes['Script'].inputs[0])
mtree.nodes['RGB'].outputs[0].default_value = color
else:
mtree.links.new(mtree.nodes['Image Texture'].outputs[0],
mtree.nodes['Script'].inputs[0])
#####################################################
if RANDOM_LIGHTS_per_FRAME:
# TODO: save all sh coeffs
sh_coeffs = set_lights(RANDOM_LIGHTS)
#####################################################
for scss in harmonics_scripts:
for ish, coeff in enumerate(sh_coeffs):
for sc in scs:
sc.inputs[ish + 1].default_value = coeff
# rendered the textured body
scene.render.use_antialiasing = True
for i in range(nrOfPeople):
for part, mat in materials[i].items():
if True:
mat.node_tree.nodes['Image Texture'].image = cloth_img[i]
else:
print(0)
#############################################
#############################################
scene.render.filepath = PATH_OUT_full_textBG
#############################################
#############################################
bpy.ops.render.render(write_still=True)
########################################
########################################
##############################################################
#############################################################
# bone locations should be saved AFTER rendering so that the bones are updated
pose_joints_2d = {}
pose_joints_3d = {}
for ob, arm_ob, obname in zip(obs, arm_obs, obnames):
bpy.ops.object.select_all(action='DESELECT')
arm_ob.select = True
bpy.context.scene.objects.active = arm_ob
pose_joints_2d[obname], pose_joints_3d[obname], bone_loc_names = get_bone_locs(arm_ob,
ob,
obname,
scene,
cam_ob,
n_bones,
UPPER_HEAD)
| np.save(PATH_OUT_pose_joints_3d, pose_joints_3d) | numpy.save |
import numpy as np
### Work Needs to be done. Not fully functional
#################################################
# Get input and remove delimiters
#################################################
seq = input("Enter in sequence: ")
seq = list(seq.split(", "))
count = 0 #stores the # of consecutive singular matrices
length = 0 #stores the length of the recurrence
sol = 0
tol = 3
print(str(len(seq)))
#for i in range(1, int(len(seq)/2)):
# matrix = np.zeros((i, i), dtype=int)
# for x in range(0, i):
# for y in range(0, i):
# matrix[x][y] = int(seq[x+y]) - int('0')
#
# print(str(i) + " " + str(count), end = " ")
# print(str(int(np.linalg.det(matrix)%2)) + "\n" + "-------------------------------------------------")
#
# if count == 5 and int(np.linalg.det(matrix)%2) == 0:
# length = i - 6
# break
# elif int(np.linalg.det(matrix)%2) == 0:
# count+=1
# elif int(np.linalg.det(matrix)%2) == 1:
# count = 0
#
#######################################################
# Formulate Data into a matrix of size len(seq)/2
#######################################################
seqMatrix = np.zeros((int(len(seq)/2),int(len(seq)/2)), dtype=int)
for x in range(0, int(len(seq)/2)):
for y in range(0, int(len(seq)/2)):
seqMatrix[x][y] = seq[x+y]
#######################################################
# Calculate determinants of sub-matrices and stop
# when you encounter 3 consecutive 0 determinants
# stop and check if the last non-singular matrix
# gives a valid solution. If not continue
#######################################################
for i in range(12, int(len(seq)/2)):
subMatrix = np.zeros((i,i), dtype=int)
for x in range(0,i):
for y in range(0,i):
subMatrix[x][y] = seqMatrix[x][y]
det = int(np.linalg.det(subMatrix)%2)
print(i, end="")
print(" ", end="")
print(det, end="")
print(" ", end="")
print(count, end="")
print("\n<<<<<<<<<<<<<<<<")
if det == 1:
count = 0
elif count == tol and det == 0:
count = 0
length = i-tol-1
rhsMatrix = | np.zeros((1,i-tol-1), dtype=int) | numpy.zeros |
class input_data:
## Value Initilization
import numpy as np
from matplotlib import pyplot as plt
import math
# input data for temperature profile calculation
rf = (4.18/2)*1E-3
dr = 1E-4 # mesh spacing
alphat = 1E-6 # alpha only contains 1/rho*Cp
dt_max = (dr ** 2) / (2 * alphat) # stability condition
dt = dt_max / 10 # time step size
sp = int(rf/dr) # no of spatial points
time_step = int(1E4) # time points
kc = 29.0 # clad conductance
kg = 58.22E-3 # conductance in gap between clad and gap
G = 427E-6 # gap
hg = kg / G # gap conductance
linear_heat_rate = 400*1E2
Qe = linear_heat_rate/(3.14*rf**2)
# Parameter for calculation of k
k = [2 for i in range(sp + 1)] # initial array of k
x = 2.0 - 1.97 # 1.97 is O/M ratio
A = 2.85 * x + 0.035
B = -0.715 * x + 0.286
space = [i for i in range(1, sp)]
# Input data for crack surface area calculation
a = 5E-6 # grain radius
R = rf-0.09E-3 # radius of the pellet
H = 14.0E-3 # height of the pellet
thickness_of_annuli = 0.5E-3
no_of_annuli = int(R / thickness_of_annuli) # The pellet is divided in n annuli
q = 35 # E3 # LHR of pin (kW/m)
Nc = q / 2.0 # No of radial cracks
S = np.zeros(no_of_annuli + 1) # initialization suRf_dotace area
r = np.ones(no_of_annuli + 1)
r[0] = R
V = float((3.14 * (R ** 2) * H) / no_of_annuli)
temp_T = np.ones(no_of_annuli)
n = int(5E3)
s_v_gb_cummulative = 0
s_v_gb = np.zeros(n)
fc = np.ones(n)
Dfc = np.ones(n)
Q = np.ones(n)
DQ = np.ones(n)
Rf_dot = np.ones(n)
# time required for the establishment of grain bubble t_est
# n = int(1E2)
fc = np.ones(n)
Dfc = np.ones(n)
DQ = np.ones(n)
Rf_dot = np.zeros(n)
to = 1 * np.zeros(n)
tc = 1E2*np.ones(n)
rt = 1E-10*np.ones(n)
# rt[1] = 1E-/7
rt[1] = 1E-09
rc = np.ones(n)
rc[1] = 1E-7
Re_dot = np.zeros(n)
Re_dot[1] = 1E1
Rc_dot = np.zeros(n)
Rc_dot[1] = 1E1
# rt[1] = 1E-7
# rc[1] = 1E-7
a1 = 0.1
a2 = 2.2
omega = 4.1E-29
Del_S_by_S = | np.zeros(n) | numpy.zeros |
import numpy as np
import matplotlib.pyplot as plt
import importlib
import matplotlib as mp
from numpy import exp
from mpl_toolkits.mplot3d import axes3d
from numpy import sqrt, arange, pi, meshgrid, cos, sin, mod, size, ones, zeros, linspace, floor, exp
fig_width_pt = 4*246.0 # Get this from LaTeX using \showthe\columnwidth
inches_per_pt = 1.0/72.27 # Convert pt to inch
golden_mean = (np.sqrt(5)-1.0)/2.0 # Aesthetic ratio
fig_width = fig_width_pt*inches_per_pt # width in inches
fig_height = fig_width*golden_mean # height in inches
fig_size = [fig_width,fig_height]
# params = {'backend': 'ps',
# 'axes.labelsize': 40,
# 'font.size': 40,
# 'legend.fontsize': 40,
# 'xtick.labelsize': 40,
# 'ytick.labelsize': 40,
# 'lines.linewidth': 6,
# 'text.usetex': True,
# 'figure.figsize': fig_size}
mp.rc('lines', lw=6)
mp.rc('savefig', format='pdf')
mp.rc('font', size = 40)
mp.rc('text', usetex = True)
# function to create a subax
def create_subax(fig, ax, rect, xlimit=[], ylimit=[], xticks=[], yticks=[], side='b', xticklab=[], yticklab=[]):
box = ax.get_position()
width = box.width
height = box.height
inax_position = ax.transAxes.transform(rect[0:2])
transFigure = fig.transFigure.inverted()
infig_position = transFigure.transform(inax_position)
x = infig_position[0]
y = infig_position[1]
width *= rect[2]
height *= rect[3] # <= Typo was here
sub_ax = fig.add_axes([x,y,width,height])
if xlimit:
sub_ax.set_xlim(xlimit)
if ylimit:
sub_ax.set_ylim(ylimit)
for tick in sub_ax.xaxis.get_major_ticks():
tick.label.set_fontsize(30)
for tick in sub_ax.yaxis.get_major_ticks():
tick.label.set_fontsize(30)
sub_ax.set_xticks(xticks)
sub_ax.set_yticks(yticks)
if xticklab:
sub_ax.set_xticklabels(xticklab)
if yticklab:
sub_ax.set_yticklabels(yticklab)
if xlimit and ylimit:
rect1 = mp.patches.Rectangle((xlimit[0],ylimit[0]), xlimit[1]-xlimit[0], ylimit[1]-ylimit[0],
color='k', fill=False, lw=2, zorder=5)
ax.add_patch(rect1)
transData = ax.transData.inverted()
if side == 'b':
subax_pos1 = transData.transform(ax.transAxes.transform(np.array(rect[0:2])+np.array([0,rect[3]])))
subax_pos2 = transData.transform(ax.transAxes.transform(np.array(rect[0:2])+np.array([rect[2],rect[3]])))
ax.plot([xlimit[0],subax_pos1[0]],[ylimit[0],subax_pos1[1]], color='k', lw=2)
ax.plot([xlimit[1],subax_pos2[0]],[ylimit[0],subax_pos2[1]], color='k', lw=2)
elif side == 'r':
subax_pos1 = transData.transform(ax.transAxes.transform( | np.array(rect[0:2]) | numpy.array |
#!/usr/bin/env python
from __future__ import division, absolute_import, print_function
from functools import partial
import subprocess
from distutils.util import strtobool
import numpy as np
from jams.const import huge
from jams.npyio import savez_compressed
from jams.closest import closest
# ToDo:
# Handling constraints
# write tmp/population files (as in SCE of Fortran)
# write out also in logfile if not None (use jams.tee as in joptimise)
# crossover with quadratic function (QIPSO) ?
def _ext_obj_wrapper(func, lb, ub, mask,
parameterfile, parameterwriter, objectivefile, objectivereader, shell, debug, rank,
params):
'''
Wrapper function for external program to be optimised
to be used with partial:
obj = partial(_ext_obj_wrapper, func, lb, ub, mask,
parameterfile, parameterwriter, objectivefile, objectivereader, shell, debug, rank)
This allows then calling obj with only the argument params:
fx = obj(params)
Definition
----------
def _ext_obj_wrapper(func, lb, ub, mask,
parameterfile, parameterwriter, objectivefile, objectivereader, shell, debug, rank,
params):
Input
-----
func function to minimise (python function or string for external executable)
lb (npars) lower bounds of parameters
ub (npars) upper bounds of parameters
mask (npars) mask to include (1) or exclude (0) parameter from optimisation
parameterfile Parameter file for executable; must be given if func is name of executable
parameterwriter Python function for writing parameter file if func is name of executable
objectivefile File with objective value from executable; must be given if func is name of executable
objectivereader Python function for reading objective value if func is name of executable
shell If True, the specified command will be executed through the shell.
debug If True, model output is displayed for executable.
rank Process rank will be given as last argument to external program.
params (npars) parameter set
Output
------
Function value
History
-------
Written, MC, Nov 2016
'''
if isinstance(func, (str,list)):
parameterwriter(parameterfile, params, lb, ub, mask)
if rank is None:
func1 = func
else:
if isinstance(func, str):
func1 = [func, str(rank)]
else:
func1 = func+[str(rank)]
if debug:
err = subprocess.call(func1, shell=shell)
else:
err = subprocess.check_output(func1, shell=shell)
obj = objectivereader(objectivefile)
return obj
else:
return func(params)
# Function wrappers for objective and constraints
# used with functools.partial
def _obj_wrapper(func, arg, kwarg, x):
'''
Wrapper function for function to be optimised
to be used with partial:
obj = partial(_obj_wrapper, func, arg, kwarg)
This allows then calling obj with only the argument x:
fx = obj(x)
which translates to:
fx = func(x, *arg, **kwarg)
'''
return func(x, *arg, **kwarg)
def _is_feasible_wrapper(func, x):
'''
Checking that all constraints are met.
func is one of the 'partialised' constraint functions
_cons_none_wrapper, _cons_ieqcons_wrapper, and _cons_f_ieqcons_wrapper.
This function is partialised:
is_feasible = partial(_is_feasible_wrapper, cons)
which is unnecessary for now. It could still simply be:
is_feasible = cons
'''
return np.all(func(x)>=0.)
def _cons_none_wrapper(x):
'''
Dummy constraints function if no constraints.
Returns 0.
'''
return np.array([0])
def _cons_ieqcons_wrapper(ieqcons, arg, kwarg, x):
'''
Wrapper function for constraints to be used with partial in the case
that ieqcons is given, i.e. a list of constraint functions, returning
>=0. if constraints are met.
'''
return np.array([y(x, *arg, **kwarg) for y in ieqcons])
def _cons_f_ieqcons_wrapper(f_ieqcons, arg, kwarg, x):
'''
Wrapper function for constraints to be used with partial in the case
that f_ieqcons is given, i.e. a single function returning an 1-D array
values >=0. where constraints are met.
'''
return np.array(f_ieqcons(x, *arg, **kwarg))
def range012range(x, xmin, xmax):
'''
Convert range between [0,1] to range [xmin,xmax]
'''
return xmin + x*(xmax-xmin)
def range2range01(x, xmin, xmax):
'''
Convert range between [xmin,xmax] to range [0,1]
'''
return (x-xmin)/(xmax-xmin)
def diversity(x, lb, ub, mask):
'''
Diversity in swarm positions.
From [Pant et al., 2009] http://www.iaeng.org/IJCS/issues_v36/issue_2/IJCS_36_2_02.pdf
changed to RMSE.
RMSE is generally about 1/2 or 1/3 of normalized geometric range (SCE).
'''
# all dimensions [0,1]
x01 = np.where(mask, range2range01(x,lb,ub), x)
S = float(x01.shape[0])
D = float(x01.shape[1])
# average particle position
pmean = np.sum(x01, axis=0)/S
# # Eq. 5 of Pant et al. (2009)
# div = np.sum(np.sqrt(np.sum((x01-pmean)**2,axis=1)),axis=0) / S
# average RMSE
div = np.sum(np.sqrt(np.sum((x01-pmean)**2,axis=1)/D),axis=0) / S
# # Normalized geometric range of SCE
# imax = x01.max(axis=0)
# imin = x01.min(axis=0)
# imaxmin = np.ma.array(imax-imin, mask=(imax==imin))
# div = np.ma.exp(np.ma.mean(np.ma.log(imaxmin)))
return div
def get_neighbor_indeces(n, S, topology, kl=1):
'''
Get the indices of the neighbors for the current particle given the topology.
Input
-----
n integer
Particle index.
S integer
Swarm size.
topology string
Neighborhood topologies. These are rather social than geographical topologies.
All neighborhoods comprise the current particle as well.
[Kennedy & Mendes, 2002] doi: 10.1109/CEC.2002.1004493
'gbest' Neighborhood is entire swarm.
'lbest' Partciles arranged in a ring, in which each particle communicates with
kl particles on each side, i.e. particle i has the neighborhood
i-kl, i-kl+1, ..., i, i+1, ..., i+kl-1, i+kl
[Mohais et al., 2005] doi: 10.1007/11589990_80
'neumann' Neighborhood of a point including all points at a Hamming distance of 1.
Particles are arranges in a lattice, where each particle interacts with
its immediate 4 neighbors to the N, S, E, and W.
[Kennedy and Mendes, 2006] doi: 10.1109/TSMCC.2006.875410
The von Neumann neighborhood is configured into r rows and c columns,
where r is the highest integer less than or equal to sqrt(n) that evenly
divides n and c = n / r
[Mohais et al., 2005] doi: 10.1007/11589990_80
'ring' 'lbest' with kl=1
Optional Input
--------------
kl integer
Neighborhood distance in topology 'lbest'.
(Default: 1 = ring)
'''
if topology.lower() == 'ring': kl=1
if topology.lower() == 'gbest':
ii = range(S)
elif (topology.lower() == 'lbest') or (topology.lower() == 'ring'):
ii = [n] # self
for kk in range(1,kl+1):
ii.append((n-kk) % S) # left
ii.append((n+kk) % S) # right
elif topology.lower() == 'neumann':
rows = int(np.floor(np.sqrt(S)))
while (S % rows) != 0: rows -= 1
cols = S // rows
left = lambda x, c: (x - 1) % c + x//c * c
right = lambda x, c: (x + 1) % c + x//c * c
above = (n - rows) % S
below = (n + rows) % S
ii = [left(above, cols), above, right(above, cols),
left(n, cols), n, right(n, cols),
left(below, cols), below, right(below, cols)]
return ii
def get_best_neighbor(p, fp, topology, kl=1):
'''
Get the best neighbor for a given topology
Input
-----
p ND-array
The best known position of each particle.
fp 1D-array
The objective values at each position in p.
topology string
Neighborhood topologies. These are rather social than geographical topologies.
All neighborhoods comprise the current particle as well.
[<NAME>, 2002] doi: 10.1109/CEC.2002.1004493
'gbest' Neighborhood is entire swarm.
'lbest' Partciles arranged in a ring, in which each particle communicates with
kl particles on each side, i.e. particle i has the neighborhood
i-kl, i-kl+1, ..., i, i+1, ..., i+kl-1, i+kl
[Mohais et al., 2005] doi: 10.1007/11589990_80
'neumann' Neighborhood of a point including all points at a Hamming distance of 1.
Particles are arranges in a lattice, where each particle interacts with
its immediate 4 neighbors to the N, S, E, and W.
[<NAME>, 2006] doi: 10.1109/TSMCC.2006.875410
The von Neumann neighborhood is configured into r rows and c columns,
where r is the highest integer less than or equal to sqrt(n) that evenly
divides n and c = n / r
[Mohais et al., 2005] doi: 10.1007/11589990_80
'ring' 'lbest' with kl=1
Optional Input
--------------
kl integer
Neighborhood distance in topology 'lbest'.
(Default: 1 = ring)
'''
if topology.lower() == 'ring': kl=1
S = p.shape[0]
D = p.shape[1]
g = np.ones((S,D))*np.inf
fg = np.ones(S)*np.inf
if topology.lower() == 'gbest':
i_min = np.argmin(fp)
ig = p[i_min,:] # overall best
ifg = fp[i_min]
for ss in range(S):
g[ss,:] = ig
fg[ss] = ifg
elif (topology.lower() == 'lbest') or (topology.lower() == 'ring') or (topology.lower() == 'neumann'):
g = np.ones((S,D))*np.inf
fg = np.ones(S)*np.inf
for ss in range(S):
ii = get_neighbor_indeces(ss, S, topology, kl=kl)
pp = p[ii,:]
fpp = fp[ii]
i_min = np.argmin(fpp)
g[ss,:] = pp[i_min,:].copy()
fg[ss] = fpp[i_min]
return [g, fg]
def rwde(func, feasible, x, fx, xmin, xmax, lb, ub, x0, mask, maxit, nlocal=5):
'''
Random Walk with Direction Exploitation
[Petalas et al. 2007] doi: 10.1007/s10479-007-0224-y
as implemented in
[Wang et al. 2012] doi: 10.1016/j.ins.2012.02.016
Input
-----
func objective function wrapper
function to min-/maximise
feasible constraint function wrapper
x 1D-array
Position of one particle in parameter space.
fx scalar
The objective value at position x.
xmin 1D-array
Lower bounds of particles search positions (parameters).
xmax 1D-array
Upper bounds of particles search positions (parameters).
lb 1D-array
The lower bounds of the particle positions (parameters).
ub 1D-array
The upper bounds of the particle positions (parameters).
x0 1D-array
Will be taken at dimensions with mask==False.
mask 1D-array
include (1,True) or exclude (0,False) parameters in optimisation.
Optional Input
--------------
nlocal numer of local steps (Default: 5)
'''
# constants
D = x.shape[0]
lam_init = 1. # inital stepsize
# local search
large = fx
t = 0
lam = lam_init
ngood = 0
for t in range(nlocal):
z = np.random.uniform(size=D)
xnew = x + lam*z*(xmax - xmin)
xnew = np.where(mask, xnew, x0) # mask
xnew = np.clip(xnew, lb, ub) # limit
fs = feasible(xnew)
if fs:
fxnew = func(xnew)
if maxit: fxnew *= -1.
# NaN/Inf
large = 1.1*large if large>0. else 0.9*large
fxnew = fxnew if np.isfinite(fxnew) else large
if fxnew < fx:
x = xnew
fx = fxnew
lam = lam_init
ngood += 1
elif fxnew > fx:
lam *= 0.5
else:
continue
return x, fx, ngood
def cbls(func, feasible, x, fx, p, xmin, xmax, lb, ub, x0, mask, inertia, phip, maxit, strategy, nlocal=5):
'''
Cognition-Based Local Search
[Wang et al. 2012] doi: 10.1016/j.ins.2012.02.016
Input
-----
func objective function wrapper
function to mini-/maximise
feasible constraint function wrapper
x 1D-array
Position of one particle in parameter space.
fx 1D-array
The objective value at position x.
p 1D-array
Particle x' best position.
xmin 1D-array
Lower bounds of particles search positions (parameters).
xmax 1D-array
Upper bounds of particles search positions (parameters).
lb 1D-array
The lower bounds of the particle positions (parameters).
ub 1D-array
The upper bounds of the particle positions (parameters).
x0 1D-array
Will be taken at dimensions with mask==False.
mask 1D-array
include (1,True) or exclude (0,False) parameters in optimisation.
inertia scalar
Particle velocity scaling factor.
Default depends on algorithm (strategy).
phip scalar
Scaling factor to search away from the particle's best known position.
Default depends on algorithm (strategy).
strategy string
PSO variants.
'original': Textbook particle swarm algorithm with inertia weight
x = current position
p = particles best position
g = neighborhood best position
rg, rp = np.random.uniform(size=(S,D))
inertia=0.5, phip=2., phig=2.
v = inertia*v + rp*phip*(p-x) + rg*phig*(g-x)
x = x + v
'inertia': Same as 'original' but with inertia weight decreasing from 0.9 to 0.4
over time, i.e. over iterations (it)
imax = 0.9
imin = 0.4
inertia = imax - float(it)/float(maxn-1) * (imax-imin)
'canonical': Clerk & Kennedy (2000) with fixed, clamped constriction factor
From PaGMO (esa.github.io/pagmo):
Clerc's analysis of the iterative system led him to propose a strategy for the
placement of "constriction coefficients" on the terms of the formulas; these
coefficients controlled the convergence of the particle and allowed an elegant and
well-explained method for preventing explosion, ensuring convergence, and
eliminating the arbitrary vmax parameter. The analysis also takes the guesswork
out of setting the values of phi_1 and phi_2.
"This is the canonical particle swarm algorithm of today."
[Poli et al., 2007] doi: 10.1007/s11721-007-0002-0
[Clerc & Kennedy, 2002] doi: 10.1109/4235.985692
From <NAME>, Diss 2001:
The problem [with the constriction factor], according to Eberhart and Shi,
is that the particles stray too far from the desired region of search space.
To mitigate this effect they decided to apply clamping to the constriction factor
implementation as well, setting the vmax parameter equal to xmax, the size of the
search space. This led to improved performance for almost all the functions they
used during testing - both in terms of the rate of convergence and the ability of
the algorithm to reach the error threshold.
[<NAME> & <NAME>, 2000] Comparing inertia weights and constriction factors
in particle swarm optimization. In: Proceedings of the Congress on Evolutionary
Computation (CEC2000), 84-88.
[<NAME>, 2000] An Analysis of Particle Swarm Optimizers, PhD thesis,
University of Pretoria, South Africa
inertia=0.7289, phip=2.05, phig=2.05
v = inertia * (v + phip*rp*(p - x) + phig*rg*(g - x))
v = clip(v, lb, ub)
x = x + v
Optional Input
--------------
nlocal numer of local steps (Default: 5)
'''
# Strategy keyword
ptypes = ['original', 'inertia', 'canonical']
assert strategy.lower() in ptypes, 'Cognition-Based Local Search not available for strategy {:}'.format(strategy)
# constants
D = x.shape[0] # # parameters
large = fx
fxnew = fx
xnew = x[:]
v = xmin + np.random.uniform(size=D)*(xmax-xmin)
ngood = 0
for t in range(nlocal):
# update velocity with only cognitive and without social component
rp = np.random.uniform(size=D)
if (strategy.lower() == 'original') or (strategy.lower() == 'inertia'):
vnew = inertia*v + phip*rp*(p - xnew)
elif strategy.lower() == 'canonical':
vnew = inertia * (v + phip*rp*(p - xnew))
# new x
xnewtest = xnew + vnew # tentative update of x
xnewtest = np.where(mask, xnewtest, x0) # mask
xnewtest = np.clip(xnewtest, lb, ub) # limit
# new fx
fs = feasible(xnewtest)
if fs:
fxnewtest = func(xnewtest)
if maxit: fxnewtest *= -1.
# NaN/Inf
large = 1.1*large if large>0. else 0.9*large
fxnewtest = fxnewtest if np.isfinite(fxnewtest) else large
if fxnewtest < fx:
xnew = xnewtest
fxnew = fxnewtest
ngood += 1
return xnew, fxnew, ngood
# Particle Swarm Optimisation
def pso(func, x0, lb, ub,
mask=None,
ieqcons=None, f_ieqcons=None,
arg=(), kwarg={},
swarmsize=None, inertia=None, phip=None, phig=None, maxn=250,
minstep=1e-8, minobj=1e-8, maxit=False,
init='lhs', strategy='canonical', topology='gbest', kl=1,
memetic='no', nmemetic=1, nlocal=5, rrwde=0.01, pls=0.2,
includex0=False, seed=None,
processes=1,
verbose=0, pout=False, cout=False,
restart=False, restartfile1='pso.restart.npz', restartfile2='pso.restart.txt',
parameterfile=None, parameterwriter=None,
objectivefile=None, objectivereader=None,
shell=False, debug=False):
"""
Particle Swarm Optimization (PSO)
Definition
----------
def pso(func, x0, lb, ub,
mask=None,
ieqcons=None, f_ieqcons=None,
arg=(), kwarg={},
swarmsize=None, inertia=None, phip=None, phig=None, maxn=250,
minstep=1e-8, minobj=1e-8, maxit=False,
init='lhs', strategy='canonical', topology='gbest', kl=1,
includex0=False, seed=None,
processes=1,
verbose=0, pout=False, cout=False,
restart=False, restartfile1='pso.restart.npz', restartfile2='pso.restart.txt',
parameterfile=None, parameterwriter=None,
objectivefile=None, objectivereader=None,
shell=False, debug=False):
Input
-----
func python function or string for external executable
The function to be minimized.
x0 1D-array
Will be taken at dimensions with mask==False.
lb 1D-array
The lower bounds of the particle positions (parameters).
ub 1D-array
The upper bounds of the particle positions (parameters).
Optional Input
--------------
mask 1D-array
include (1,True) or exclude (0,False) parameters in optimisation.
(Default: include all dimensions)
ieqcons list
A list of functions of length n such that ieqcons[j](x,*arg) >= 0.0 in
a successfully optimized problem.
(Default: None)
f_ieqcons function
Returns a 1-D array in which each element must be greater or equal
to 0.0 in a successfully optimized problem. If f_ieqcons is specified,
ieqcons is ignored.
(Default: None)
arg tuple
Additional arguments passed to objective and constraint functions (only for Python functions).
(Default: empty tuple)
kwarg dict
Additional keyword arguments passed to objective and constraint functions (only for Python functions).
(Default: empty dict)
swarmsize int
The number of particles in the swarm.
(Default: max(min(3*len(lb),40),10))
inertia scalar
Particle velocity scaling factor.
Default depends on algorithm (strategy).
phip scalar
Scaling factor to search away from the particle's best known position.
Default depends on algorithm (strategy).
phig scalar
Scaling factor to search away from the swarm's (neighbor's) best known position.
Default depends on algorithm (strategy).
maxn int
The maximum number of iterations for the swarm to search.
(Default: 250)
minstep scalar
The minimum stepsize of swarm's best position before the search terminates.
(Default: 1e-8)
minobj scalar
Objective function defining convergence. Attention at maxit=True.
(Default: 1e-8)
maxit boolean
Minimise or maximise func.
(Default: False)
False: minimise objective function down to minobj
True: maximise objective function, i.e. minimise -objective function down to minobj
init string
How to sample the initial swarm positions and velocities.
(Default: 'lhs')
'random': random sampling from uniform distribution
'lhs': latin hypercube sampling from uniform distributions
'sobol': quasirandom Sobol sequence (only up to 40 dimensions)
strategy string
PSO variants.
(Default: 'canonical')
'original': Textbook particle swarm algorithm with inertia weight
x = current position
p = particles best position
g = neighborhood best position
rg, rp = np.random.uniform(size=(S,D))
inertia=0.5, phip=2., phig=2.
v = inertia*v + rp*phip*(p-x) + rg*phig*(g-x)
x = x + v
'inertia': Same as 'original' but with inertia weight decreasing from 0.9 to 0.4
over time, i.e. over iterations (it)
imax = 0.9
imin = 0.4
inertia = imax - float(it)/float(maxn-1) * (imax-imin)
'canonical': <NAME> (2000) with fixed, clamped constriction factor
From PaGMO (esa.github.io/pagmo):
Clerc's analysis of the iterative system led him to propose a strategy for the
placement of "constriction coefficients" on the terms of the formulas; these
coefficients controlled the convergence of the particle and allowed an elegant and
well-explained method for preventing explosion, ensuring convergence, and
eliminating the arbitrary vmax parameter. The analysis also takes the guesswork
out of setting the values of phi_1 and phi_2.
"This is the canonical particle swarm algorithm of today."
[Poli et al., 2007] doi: 10.1007/s11721-007-0002-0
[Clerc & Kennedy, 2002] doi: 10.1109/4235.985692
From <NAME>, Diss 2001:
The problem [with the constriction factor], according to Eberhart and Shi,
is that the particles stray too far from the desired region of search space.
To mitigate this effect they decided to apply clamping to the constriction factor
implementation as well, setting the vmax parameter equal to xmax, the size of the
search space. This led to improved performance for almost all the functions they
used during testing - both in terms of the rate of convergence and the ability of
the algorithm to reach the error threshold.
[<NAME> & <NAME>, 2000] Comparing inertia weights and constriction factors
in particle swarm optimization. In: Proceedings of the Congress on Evolutionary
Computation (CEC2000), 84-88.
[<NAME>, 2000] An Analysis of Particle Swarm Optimizers, PhD thesis,
University of Pretoria, South Africa
inertia=0.7289, phip=2.05, phig=2.05
v = inertia * (v + phip*rp*(p - x) + phig*rg*(g - x))
v = clip(v, lb, ub)
x = x + v
'fips': Fully Informed Particle Swarm
From PaGMO (esa.github.io/pagmo):
Whereas in the traditional algorithm each particle is affected by its own
previous performance and the single best success found in its neighborhood, in
Mendes' fully informed particle swarm (FIPS), the particle is affected by all its
neighbors, sometimes with no influence from its own previous success.
"With good parameters, FIPS appears to find better solutions in fewer iterations
than the canonical algorithm, but it is much more dependent on the population
topology."
[Poli et al., 2007] doi: 10.1007/s11721-007-0002-0
[Mendes et al., 2004] doi: 10.1109/TEVC.2004.826074
ri = np.random.uniform(size=nneighbor)
inertia = 0.7289
acc_coeff = phip+phig = 4.1
v = inertia * (v + np.sum(ri*acc_coeff/nneighbor*(p[:]-x)))
'nips': Neighborhood Informed Particle Swarm
'fips' but particles are not informed by all other particles
but only the particles in its neighborhood given by topology.
ri = np.random.uniform(size=nneighbor)
inertia = 0.7289, acc_coeff = phip+phig = 4.1
v = inertia * (v + np.sum(ri*acc_coeff/nneighbor*(p[neighbor[:]]-x)))
memetic string
Perfom local search at some particle positions
(Default: 'no')
'no': No local search.
'global': Local search is applied on the overall best position of the swarm.
'local': Local search is applied on each particle with a certain probability.
nmemetic integer
Do local search after each nmemetic swarm iterations.
(Default: 1)
nlocal integer
Number of local searches.
(Default: 5)
rrwde float
Norm below which local search uses Random Walk with Direction Exploitation (RWDE),
otherwise Cognition-Based Local Search (CBLS)
(Default: 0.01)
pls float or None
Probability for each particle to perform local search after nmemetic iterations
(Default: 0.2)
If None or < 0 then pls will be calculated from the success rate of the last local searches.
[Wang et al. 2012] doi: 10.1016/j.ins.2012.02.016
Be ngood the actual number of valid local moving steps (at the memetic step before, t-1)
and ntotal the total local moving steps carried out during iteration t-1, respectively.
rgood = ngood/ntotal
delta = 0.1, beta = 0.5, plsmin = 0.1, plsmax = 1.0
pls = min(max(beta**np.sign(rgood-delta)*pls, plsmin), plsmax)
topology string
Neighborhood topologies. These are rather social than geographical topologies.
All neighborhoods comprise the current particle as well.
[Kennedy & Mendes, 2002] doi: 10.1109/CEC.2002.1004493
(Default: 'gbest')
'gbest' Neighborhood is entire swarm.
'lbest' Partciles arranged in a ring, in which each particle communicates with
kl particles on each side, i.e. particle i has the neighborhood
i-kl, i-kl+1, ..., i, i+1, ..., i+kl-1, i+kl
[Mohais et al., 2005] doi: 10.1007/11589990_80
'mbest' Neighborhood is entire part of the swarm on the current MPI process.
'mbest' == 'gbest' if run is on one processor.
'mbest' gets a more and more local search when mpi processes increase.
This is more a gimmick than a real topology.
'neumann' Neighborhood of a point including all points at a Hamming distance of 1.
Particles are arranges in a lattice, where each particle interacts with
its immediate 4 neighbors to the N, S, E, and W.
[Kennedy and Mendes, 2006] doi: 10.1109/TSMCC.2006.875410
The von Neumann neighborhood is configured into r rows and c columns,
where r is the highest integer less than or equal to sqrt(n) that evenly
divides n and c = n / r
[Mohais et al., 2005] doi: 10.1007/11589990_80
'ring' 'lbest' with kl=1
kl integer
Neighborhood distance in topology 'lbest'.
(Default: 1 = ring)
verbose integer
Controlling amount of print-out.
(Default: 0)
0: No print-out.
1: Printing convergence criteria, etc.
2: Printing after each step.
includex0 boolean
True: include x0 in initial swarm
(Default: False)
seed int or array_like
Seed for numpy's random number generator.
(Default: None)
processes int
The number of processes to use to evaluate objective function and constraints.
(Default: 1)
pout boolean
True: include best per-particle positions and their objective values in output.
(Default: False)
cout boolean
True: include number of function calls in output.
(Default: False)
restart boolean
if True, continue from saved state in restartfile1/2.
(Default: False)
restartfile1/2 string
File names for saving current state of PSO.
(Default: pso.restart.npz and pso.restart.txt)
State will be always written, except if restartfile1=None.
parameterfile string
Parameter file for executable; must be given if func is name of executable
(Default: None)
parameterwriter function
Python function for writing parameter file if func is name of executable
(Default: None)
objectivefile string
File with objective value from executable; must be given if func is name of executable
(Default: None)
objectivereader function
Python function for reading objective value if func is name of executable
(Default: None)
shell boolean
If True, the specified executable will be executed through the shell.
(Default: False)
debug boolean
If True, model output is displayed for executable.
(Default: False)
Output
------
g 1D-array
The swarm's best known position.
fg scalar
The objective value at g.
p ND-array
The best known position of each particle.
fp 1D-array
The objective values at each position in p.
License
-------
The original code of <NAME> (pyswarm) was published under the BSD license.
This file is part of the JAMS Python package, distributed under the MIT
License. The JAMS Python package originates from the former UFZ Python library,
Department of Computational Hydrosystems, Helmholtz Centre for Environmental
Research - UFZ, Leipzig, Germany.
Copyright (c) 2013-2016 <NAME>, <NAME> - mc (at) macu (dot) de
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
History
-------
Written, <NAME>, 2013 - https://github.com/tisimst/pyswarm
Modified, MC, Nov 2016 - adapted to JAMS package
MC, Nov 2016 - Changed defaults from swarmsize=100, omega=0.5, phip=0.5, phig=0.5, maxn=100
- Include vmax for original: clip(v, vmin, vmax)
- Sobol sequences and latin hypercube sampling for initial swarm positions
- Different PSO algorithms: original, decreasing inertia weights, constricted
fully informed
- Stop if function below minobj
- debug -> verbose, maxit, cout
- neighborhoods
- external function - mask, x0, parameterfile, parameterwriter,
objectivefile, objectivereader, shell, debug
MC, Dec 2016 - includex0, restart, mpi, memetic
"""
# Get MPI communicator
try:
from mpi4py import MPI
comm = MPI.COMM_WORLD
csize = comm.Get_size()
crank = comm.Get_rank()
except ImportError:
comm = None
csize = 1
crank = 0
if csize > 1:
passrank = crank
else:
passrank = None
dodiv = False
# Different variabels types (array, float, int, ...) for restart
if restartfile1 is not None:
# Only arrays with savez_compressed - restartfile1
restartarray = ['x0', 'lb', 'ub', 'mask1', 'mask2', 'x02',
'v', 'x', 'fx', 'fs', 'p', 'fp', 'gp', 'fgp',
'rs2']
if dodiv: restartarray.extend(['gx'])
# Save scalars in simple text file - restartfile2
restartint = ['D', 'S', 'it', 'iS', 'crank',
'rs3', 'rs4', 'ilocal', 'nlgood', 'nltotal']
restartfloat = ['rs5', 'ipls']
restartbool = ['maxit']
restartstring = ['rs1']
if csize > 1:
restartfile1 = restartfile1[0:restartfile1.rfind(".")] + '.' + str(crank) + restartfile1[restartfile1.rfind("."):]
restartfile2 = restartfile2[0:restartfile2.rfind(".")] + '.' + str(crank) + restartfile2[restartfile2.rfind("."):]
saveargarray = '"'+restartfile1+'"'
for j in restartarray: saveargarray = saveargarray + ', '+j+'='+j
saveargint = ','.join(restartint)
saveargfloat = ','.join(restartfloat)
saveargbool = ','.join(restartbool)
saveargstring = ','.join(restartstring)
# Checks
# Function
assert hasattr(func, '__call__') or isinstance(func, (str,list)), 'Invalid function handle or external call.'
# Bounds
assert len(lb)==len(ub), 'Lower- and upper-bounds must have the same lengths.'
lb = np.array(lb)
ub = np.array(ub)
assert np.all(ub >= lb), 'All upper-bounds must be greater or equal than lower-bounds.'
# Mask
if mask is not None:
assert len(mask)==len(ub), 'Mask and bounds must have the same lengths.'
if not np.all(mask):
assert len(mask)==len(x0), 'Mask and x0 must have the same lengths.'
# Initialisation keyword
inits = ['random', 'lhs', 'sobol']
assert init.lower() in inits, 'Initialisation {:} not in {:}'.format(init, inits)
if init.lower() == 'sobol':
assert len(lb) <= 40, "Sobol' sequences only work up to 40 dimensions."
# Strategy keyword
ptypes = ['original', 'inertia', 'canonical', 'fips', 'nips']
assert strategy.lower() in ptypes, 'PSO implementation {:} not in {:}'.format(strategy, ptypes)
# Topology keyword
ttypes = ['gbest', 'lbest', 'mbest', 'neumann', 'ring']
assert topology.lower() in ttypes, 'Topology {:} not in {:}'.format(topology, ttypes)
# Mmemetic keyword
mtypes = ['no', 'global', 'local']
assert memetic.lower() in mtypes, 'Memetic implementation {:} not in {:}'.format(memetic, mtypes)
# Parameterfile etc. keywords if func is name of executable
if isinstance(func, (str,list)):
if parameterfile is None:
raise IOError('parameterfile must be given if func is name of executable.')
else:
if csize > 1:
parameterfile1 = parameterfile + '.' + str(crank)
else:
parameterfile1 = parameterfile
if parameterwriter is None:
raise IOError('parameterwrite must be given if func is name of executable.')
if objectivefile is None:
raise IOError('objectivefile must be given if func is name of executable.')
else:
if csize > 1:
objectivefile1 = objectivefile + '.' + str(crank)
else:
objectivefile1 = objectivefile
if objectivereader is None:
raise IOError('objectivereader must be given if func is name of executable.')
# Set defaults per strategy
if strategy.lower() == 'original': # Kennedy & Eberhart, 2001
if inertia is None: inertia=0.5
if phip is None: phip=2.
if phig is None: phig=2.
elif strategy.lower() == 'inertia': # Shi & Eberhart (1998)
imax = 0.9
imin = 0.4
if phip is None: phip=2.
if phig is None: phig=2.
elif strategy.lower() == 'canonical': # Clerc & Kennedy (2000)
if inertia is None: inertia=0.7289
if phip is None: phip=2.05
if phig is None: phig=2.05
elif strategy.lower() == 'fips': # Mendes & Kennedy (2004)
if inertia is None: inertia=0.7289
if phip is None: phip=2.05
if phig is None: phig=2.05
elif strategy.lower() == 'nips':
if inertia is None: inertia=0.7289
if phip is None: phip=2.05
if phig is None: phig=2.05
if not restart:
# Problem sizes
D = len(lb) # dimension of each particle
if swarmsize is None:
S = max(min(3*D,40),10)
S = max(min(S//csize*csize, (40//csize+1)*csize), (10//csize+1)*csize)
else:
S = swarmsize
# Local swarmsize
if S % csize != 0:
raise ValueError("Swarmsize "+str(S)+" must be multiple of number of processes "+str(csize)+".")
iS = S//csize # local swarmsize
# Initialise 1D mask
if mask is not None:
mask1 = mask
else:
mask1 = np.ones(D, dtype=np.bool)
# Partialise objective function
if isinstance(func, (str,list)):
obj = partial(_ext_obj_wrapper, func, lb, ub, mask1,
parameterfile1, parameterwriter, objectivefile1, objectivereader, shell, debug, passrank)
else:
obj = partial(_obj_wrapper, func, arg, kwarg)
# Check for constraint function(s) and partialise them
if f_ieqcons is None:
if ieqcons is None:
if (verbose>=1) and (crank == 0):
print('No constraints given.')
cons = _cons_none_wrapper
else:
if (verbose>=1) and (crank == 0):
print('Converting ieqcons to a single constraint function.')
cons = partial(_cons_ieqcons_wrapper, ieqcons, arg, kwarg)
else:
if (verbose>=1) and (crank == 0):
print('Single constraint function given in f_ieqcons.')
cons = partial(_cons_f_ieqcons_wrapper, f_ieqcons, arg, kwarg)
is_feasible = partial(_is_feasible_wrapper, cons)
# Initialize the multiprocessing module if necessary
if processes > 1:
import multiprocessing
mp_pool = multiprocessing.Pool(processes)
# 2D mask
mask2 = np.tile(mask1,iS).reshape((iS,D))
x02 = np.tile(x0,iS).reshape((iS,D))
# Deal with NaN and Inf
large = huge / S
# Seed random number generator
if crank == 0:
np.random.seed(seed=seed)
# Initialize the particle swarm
# current particle positions
# current particle velocities
if init.lower() == 'random':
# Random numbers only on rank 0 for reproducible results
if crank == 0:
rand = np.random.uniform(size=(2*S,D))
else:
rand = np.empty((2*S,D), dtype=np.float)
# Scatter has different ordering than needed for reproducible results
# Do it manually
if csize > 1:
comm.Bcast(rand, root=0)
x = rand[crank*iS:crank*iS+iS,:]
v = rand[S+crank*iS:S+crank*iS+iS,:]
elif init.lower() == 'sobol':
import sobol
nskip = D*S + crank*D*iS
x = sobol.i4_sobol_generate(D,iS,nskip).transpose()
nskip = 2*D*S + crank*D*iS
v = sobol.i4_sobol_generate(D,iS,nskip).transpose()
elif init.lower() == 'lhs':
x = np.empty((iS,D), dtype=np.float64)
v = np.empty((iS,D), dtype=np.float64)
if crank == 0:
import scipy.stats as stats
from jams.lhs import lhs
dist = [stats.uniform for i in range(D)]
pars = [(0,1) for i in range(D)]
gx = lhs(dist, pars, S).transpose()
gv = lhs(dist, pars, S).transpose()
else:
gx = np.empty((S,D), dtype=np.float64)
gv = np.empty((S,D), dtype=np.float64)
if csize > 1:
comm.Scatter([gx, MPI.DOUBLE], [x, MPI.DOUBLE])
comm.Scatter([gv, MPI.DOUBLE], [v, MPI.DOUBLE])
else:
x = gx
v = gv
fx = np.ones(iS)*large # local current particles function values
if dodiv: gx = np.empty((iS,D), dtype=np.float64) # global current individual particle positions
fs = np.zeros(iS, dtype=bool) # current combined feasibility for each local particle
p = np.ones((iS,D), dtype=np.float64)*large # local particles individual best positions
fp = np.ones(iS, dtype=np.float64)*large # local particles individual best function values
gp = np.ones((S,D), dtype=np.float64)*large # global particles individual best positions
fgp = np.ones(S, dtype=np.float64)*large # global particles individual best function values
# Maximum velocity
vmax = np.abs(ub - lb)
vmin = -vmax
# Initialize particle positions and velocities
v = range012range(v,vmin,vmax)
x = range012range(x,lb,ub)
if (crank == 0) and includex0: x[-1,:] = x0
x = np.where(mask2, x, x02)
# Calculate first objective and constraints for each particle
if processes > 1:
fs = np.array(mp_pool.map(is_feasible, x))
ii = np.where(fs)[0]
if ii.size > 0:
fx[ii] = np.array(mp_pool.map(obj, x[ii,:]))
else:
for i in range(iS):
fs[i] = is_feasible(x[i,:])
if fs[i]:
fx[i] = obj(x[i,:])
# maximise
if maxit: fx *= -1.
# NaN/Inf - ToDo: Check
large = max(fp.max(), fx[np.isfinite(fx)].max())
large = 1.1*large if large>0. else 0.9*large
fx = np.where(np.isfinite(fx), fx, large)
# print(1, x[np.where(fs)[0],:])
# Store particles best positions (if constraints are satisfied)
i_update = (fx < fp) & fs
if np.any(i_update):
p[i_update,:] = x[i_update,:].copy()
fp[i_update] = fx[i_update]
# gather local best particles into global best particles
if csize > 1:
comm.Allgather([p, MPI.DOUBLE], [gp, MPI.DOUBLE])
comm.Allgather([fp, MPI.DOUBLE], [fgp, MPI.DOUBLE])
else:
gp = p
fgp = fp
if dodiv:
if csize > 1:
comm.Allgather([x, MPI.DOUBLE], [gx, MPI.DOUBLE])
else:
gx = x
gmask = np.tile(mask1,S).reshape((S,D))
divers = diversity(gx, lb, ub, gmask)
if crank == 0: print(1, divers, fgp.min())
# Iterate until termination criterion met
it = 1
ilocal = 0
nlgood = 0
nltotal = nlocal
delta = 0.1
beta = 0.5
plsmin = 0.1
plsmax = 1.0
if pls is None:
dopls = True
else:
if pls < 0.:
dopls = True
else:
dopls = False
if dopls:
ipls = plsmax
else:
ipls = pls
# save restart
if restartfile1 is not None:
if crank == 0:
rs1, rs2, rs3, rs4, rs5 = np.random.get_state()
else:
rs1, rs2, rs3, rs4, rs5 = 'MT19937', np.array(624, dtype=np.uint), 0, 0, 0.
exec("savez_compressed("+saveargarray+")")
p2 = open(restartfile2, 'w')
exec("print("+saveargint+", file=p2)")
exec("print("+saveargfloat+", file=p2)")
exec("print("+saveargbool+", file=p2)")
exec("print("+saveargstring+", file=p2)")
p2.close()
else: # if no restart
# load restart
p1 = open(restartfile1, 'rb')
pp = np.load(p1)
for i in pp.files: exec(i+" = pp['"+i+"']")
p1.close()
p2 = open(restartfile2, 'r')
for i, pp in enumerate(p2.readline().rstrip().split()): exec(restartint[i]+" = int(pp)")
for i, pp in enumerate(p2.readline().rstrip().split()): exec(restartfloat[i]+" = float(pp)")
for i, pp in enumerate(p2.readline().rstrip().split()): exec(restartbool[i]+" = bool(strtobool(pp))")
for i, pp in enumerate(p2.readline().rstrip().split()): exec(restartstring[i]+" = pp")
p2.close()
if crank == 0:
np.random.set_state((rs1, rs2, rs3, rs4, rs5))
# Partialise objective function
if isinstance(func, (str,list)):
obj = partial(_ext_obj_wrapper, func, lb, ub, mask1,
parameterfile1, parameterwriter, objectivefile1, objectivereader, shell, debug, passrank)
else:
obj = partial(_obj_wrapper, func, arg, kwarg)
# Check for constraint function(s) and partialise them
if f_ieqcons is None:
if ieqcons is None:
if (verbose>=1) and (crank == 0):
print('No constraints given.')
cons = _cons_none_wrapper
else:
if (verbose>=1) and (crank == 0):
print('Converting ieqcons to a single constraint function.')
cons = partial(_cons_ieqcons_wrapper, ieqcons, arg, kwarg)
else:
if (verbose>=1) and (crank == 0):
print('Single constraint function given in f_ieqcons.')
cons = partial(_cons_f_ieqcons_wrapper, f_ieqcons, arg, kwarg)
is_feasible = partial(_is_feasible_wrapper, cons)
if processes > 1:
import multiprocessing
mp_pool = multiprocessing.Pool(processes)
# Iterate swarm
while it < maxn:
# Stop if minimum found
if fgp.min() < minobj:
if (verbose>=1) and (crank == 0): print('minobj found (1).')
break
# Memetic PSO
if (memetic.lower() == 'global') and (it % nmemetic == 0):
ii0 = fgp.argmin() # gbest
xl = gp[ii0,:]
fxl = fgp[ii0]
if crank == 0:
# calc norm in [0,1] space for all parameters
gp01 = range2range01(gp,lb,ub)
xl01 = range2range01(xl,lb,ub)
allnorm = np.array([ np.linalg.norm(gp01[ii,:]-xl01) if ii != ii0 else huge for ii in range(S) ])
# # stepsize is lower or equal to furthest particle - almost no improvement
# iib = allnorm.argmax()
# # stepsize is lower or equal to mean particle distance - quite some steps with improvements
# iib = closest(allnorm, allnorm.mean())
# stepsize is lower or equal to closest particle - lots of steps with improvements
iib = allnorm.argmin()
dx = | np.abs(gp[iib,:]-xl) | numpy.abs |
""" Test the different methods of transforming the longitude coordinates after rolling"""
import numpy as np
# calculate the offset in the same way as in dataset_utils
def calculate_offset(a, bounds):
low, high = bounds
# get resolution of data
res = a[1] - a[0]
# calculate how many degrees to move by to have lon[0] of rolled subset as lower bound of request
diff = a[0] - low
# work out how many elements to roll by to roll data by 1 degree
index = 1 / res
# calculate the corresponding offset needed to change data by diff
# round up to ensure rolling by enough
# offset = math.ceil(diff * index)
offset = int(round(diff * index))
return offset
def dataset_roll(a, offset, bounds):
# roll the dataset
low, high = bounds
a_roll = np.roll(a, offset)
if offset < 0:
a_new = np.where(
np.logical_and(low >= a_roll, a_roll <= -(360 + low)), a_roll, a_roll % 360
) # this doesn't work in all negative offset cases
else:
a_new = np.where(a_roll < (360 + low), a_roll, a_roll % -360)
return a_new
def dataset_roll_using_offset(a, offset):
# roll the dataset
a_roll = np.roll(a, offset)
if offset < 0:
a_roll[offset:] = a_roll[offset:] % 360
else:
a_roll[:offset] = a_roll[:offset] % -360
return a_roll
class TestLonRoll_0_360:
# offset = 359
def test_to_minus_359_0(self):
a = np.arange(start=0, stop=360, step=1)
bounds = (-359, 0)
offset = calculate_offset(a, bounds)
a_where = dataset_roll(a, offset, bounds)
assert np.array_equal(a_where, np.arange(start=-359, stop=1, step=1))
a_offset = dataset_roll_using_offset(a, offset)
assert np.array_equal(a_offset, np.arange(start=-359, stop=1, step=1))
# offset = 270
def test_to_minus_270_to_89(self):
a = np.arange(start=0, stop=360, step=1)
bounds = (-270, 89)
offset = calculate_offset(a, bounds)
a_where = dataset_roll(a, offset, bounds)
assert np.array_equal(a_where, np.arange(start=-270, stop=90, step=1))
a_offset = dataset_roll_using_offset(a, offset)
assert np.array_equal(a_offset, np.arange(start=-270, stop=90, step=1))
# offset = 180
def test_to_minus_180_179(self):
a = np.arange(start=0, stop=360, step=1)
bounds = (-180, 179)
offset = calculate_offset(a, bounds)
a_where = dataset_roll(a, offset, bounds)
assert np.array_equal(a_where, np.arange(start=-180, stop=180, step=1))
a_offset = dataset_roll_using_offset(a, offset)
assert np.array_equal(a_offset, np.arange(start=-180, stop=180, step=1))
# offset = 90
def test_to_minus_90_269(self):
a = np.arange(start=0, stop=360, step=1)
bounds = (-90, 269)
offset = calculate_offset(a, bounds)
a_where = dataset_roll(a, offset, bounds)
assert np.array_equal(a_where, np.arange(start=-90, stop=270, step=1))
a_offset = dataset_roll_using_offset(a, offset)
assert np.array_equal(a_offset, np.arange(start=-90, stop=270, step=1))
# offset = 0
def test_to_0_359(self):
a = np.arange(start=0, stop=360, step=1)
bounds = (0, 359)
offset = calculate_offset(a, bounds)
a_where = dataset_roll(a, offset, bounds)
assert np.array_equal(a_where, | np.arange(start=0, stop=360, step=1) | numpy.arange |
import numpy as np
from numpy.testing import assert_equal
import pandas as pd
from pandas.testing import assert_frame_equal, assert_series_equal
import pytest
from linearmodels.iv.data import IVData
try:
import xarray as xr
MISSING_XARRAY = False
except ImportError:
MISSING_XARRAY = True
def test_numpy_2d() -> None:
x = np.empty((10, 2))
xdh = IVData(x)
assert xdh.ndim == x.ndim
assert xdh.cols == ["x.0", "x.1"]
assert xdh.rows == list(np.arange(10))
assert_equal(xdh.ndarray, x)
df = pd.DataFrame(x, columns=xdh.cols, index=xdh.rows)
assert_frame_equal(xdh.pandas, df)
assert xdh.shape == (10, 2)
assert xdh.labels == {0: xdh.rows, 1: xdh.cols}
def test_numpy_1d() -> None:
x = np.empty(10)
xdh = IVData(x)
assert xdh.ndim == 2
assert xdh.cols == ["x"]
assert xdh.rows == list(np.arange(10))
assert_equal(xdh.ndarray, x[:, None])
df = pd.DataFrame(x[:, None], columns=xdh.cols, index=xdh.rows)
assert_frame_equal(xdh.pandas, df)
assert xdh.shape == (10, 1)
def test_pandas_df_numeric() -> None:
x = np.empty((10, 2))
index = pd.date_range("2017-01-01", periods=10)
xdf = pd.DataFrame(x, columns=["a", "b"], index=index)
xdh = IVData(xdf)
assert xdh.ndim == 2
assert xdh.cols == list(xdf.columns)
assert xdh.rows == list(xdf.index)
assert_equal(xdh.ndarray, x)
df = pd.DataFrame(x, columns=xdh.cols, index=xdh.rows).asfreq("D")
assert_frame_equal(xdh.pandas, df)
assert xdh.shape == (10, 2)
def test_pandas_series_numeric() -> None:
x = np.empty(10)
index = pd.date_range("2017-01-01", periods=10)
xs = pd.Series(x, name="charlie", index=index)
xdh = IVData(xs)
assert xdh.ndim == 2
assert xdh.cols == [xs.name]
assert xdh.rows == list(xs.index)
assert_equal(xdh.ndarray, x[:, None])
df = pd.DataFrame(x[:, None], columns=xdh.cols, index=xdh.rows).asfreq("D")
assert_frame_equal(xdh.pandas, df)
assert xdh.shape == (10, 1)
@pytest.mark.skipif(MISSING_XARRAY, reason="xarray not installed")
def test_xarray_1d() -> None:
x_np = np.random.randn(10)
x = xr.DataArray(x_np)
dh = IVData(x, "some_variable")
assert_equal(dh.ndarray, x_np[:, None])
assert dh.rows == list(np.arange(10))
assert dh.cols == ["some_variable.0"]
expected = pd.DataFrame(x_np, columns=dh.cols, index=dh.rows)
assert_frame_equal(expected, dh.pandas)
index = pd.date_range("2017-01-01", periods=10)
x = xr.DataArray(x_np, [("time", index)])
dh = IVData(x, "some_variable")
assert_equal(dh.ndarray, x_np[:, None])
assert_series_equal(pd.Series(dh.rows), pd.Series(list(index)))
assert dh.cols == ["some_variable.0"]
expected = pd.DataFrame(x_np[:, None], columns=dh.cols, index=dh.rows)
assert_frame_equal(expected, dh.pandas)
@pytest.mark.skipif(MISSING_XARRAY, reason="xarray not installed")
def test_xarray_2d() -> None:
x_np = np.random.randn(10, 2)
x = xr.DataArray(x_np)
dh = IVData(x)
assert_equal(dh.ndarray, x_np)
assert dh.rows == list(np.arange(10))
assert dh.cols == ["x.0", "x.1"]
expected = pd.DataFrame(x_np, columns=dh.cols, index=dh.rows)
assert_frame_equal(expected, dh.pandas)
index = pd.date_range("2017-01-01", periods=10)
x = xr.DataArray(x_np, [("time", index), ("variables", ["apple", "banana"])])
dh = IVData(x)
assert_equal(dh.ndarray, x_np)
assert_series_equal(pd.Series(dh.rows), pd.Series(list(index)))
assert dh.cols == ["apple", "banana"]
expected = pd.DataFrame(x_np, columns=dh.cols, index=dh.rows)
assert_frame_equal(expected, dh.pandas)
def test_invalid_types() -> None:
with pytest.raises(ValueError):
IVData(np.empty((1, 1, 1)))
with pytest.raises(ValueError):
IVData(np.empty((10, 2, 2)))
with pytest.raises(TypeError):
class AnotherClass(object):
_ndim = 2
@property
def ndim(self) -> int:
return self._ndim
IVData(AnotherClass())
def test_string_cat_equiv() -> None:
s1 = pd.Series(["a", "b", "a", "b", "c", "d", "a", "b"])
s2 = pd.Series(np.arange(8.0))
s3 = pd.Series(
["apple", "banana", "apple", "banana", "cherry", "date", "apple", "banana"]
)
df = pd.DataFrame({"string": s1, "number": s2, "other_string": s3})
dh = IVData(df)
df_cat = df.copy()
df_cat["string"] = df_cat["string"].astype("category")
dh_cat = IVData(df_cat)
assert_frame_equal(dh.pandas, dh_cat.pandas)
def test_existing_datahandler() -> None:
x = np.empty((10, 2))
index = pd.date_range("2017-01-01", periods=10)
xdf = pd.DataFrame(x, columns=["a", "b"], index=index)
xdh = IVData(xdf)
xdh2 = IVData(xdh)
assert xdh is not xdh2
assert xdh.cols == xdh2.cols
assert xdh.rows == xdh2.rows
assert_equal(xdh.ndarray, xdh2.ndarray)
assert xdh.ndim == xdh2.ndim
assert_frame_equal(xdh.pandas, xdh2.pandas)
def test_categorical() -> None:
index = pd.date_range("2017-01-01", periods=10)
cat = pd.Categorical(["a", "b", "a", "b", "a", "a", "b", "c", "c", "a"])
num = np.empty(10)
df = pd.DataFrame(dict(cat=cat, num=num), index=index)
dh = IVData(df)
assert dh.ndim == 2
assert dh.shape == (10, 3)
assert sorted(dh.cols) == sorted(["cat.b", "cat.c", "num"])
assert dh.rows == list(index)
| assert_equal(dh.pandas["num"].values, num) | numpy.testing.assert_equal |
import sys
sys.path.append("../")
from numpy import sin, pi, arange
from appJar import gui
x = | arange(0.0, 3.0, 0.01) | numpy.arange |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
# new feature selection for MNIST dataset
# labels (index) as before (no change), see notebook 'data_mnist'
# version data_mnist_comp: max features (150 x 3 = 450)
# the version was extended and used to create data with max features (200 x 3 = 600)
# In[ ]:
import gzip
import numpy as np
import matplotlib.pyplot as plt
import copy
from scipy import ndimage, misc
threshold = 180
num_angles = 230
# In[2]:
# produce a raster (random)
# random seed: inserted only later
np.random.seed(30)
raster = np.zeros((num_angles, 5))
raster[:, 0] = np.random.randint(0, 360, num_angles)
raster[:, 1] = np.random.randint(0, 27, num_angles) # choose a row
raster[:, 2] = np.random.randint(0, 27, num_angles)
raster[:, 3] = np.random.randint(0, 27, num_angles)
raster[:, 4] = np.random.randint(0, 18, num_angles) # initial position (column) for cutting out samples of length 10, between 0 and 18
# In[5]:
# READ AND GET FEATURES TRAINING DATA
f = gzip.open('train-images-idx3-ubyte.gz','r')
num_images = 60000 #number of images to read out
image_size = 28 #image size
f.read(16) #related to position of image
buf = f.read(image_size * image_size * num_images)
data = np.frombuffer(buf, dtype=np.uint8).astype(np.float32)
data = data.reshape(num_images, image_size, image_size, 1)
res = np.zeros((num_images, num_angles * 3, 10))
res_2 = np.zeros((num_images, num_angles * 3))
res_3 = np.zeros((num_images, num_angles * 3))
for z in range(num_images):
image_binary = np.zeros((image_size, image_size))
image_binary_turned = np.zeros((image_size, image_size))
store = np.empty((num_angles * 3, 10))
image = np.asarray(data[z]).squeeze() #python array with 28 x 28 pixel values
for i, angle in enumerate(raster[:, 0]):
image_turned = ndimage.rotate(image, angle, reshape=False)
for a in range(image_size):
image_binary_turned[a , :] = [0 if i < threshold else 1 for i in image_turned[a,:]]
event_rows = np.zeros((3, 10)) # 1 times 10 bins long
for c, start in enumerate(raster[i, 1:4]):
#start = raster[i, 1]
for b in range(10):
if (image_binary_turned[int(start), (b + int(raster[i, 4]))] < image_binary_turned[int(start), (b + 1 + int(raster[i, 4]))]) and (np.size(np.nonzero(event_rows[c, :])) == 0):
event_rows[c, b] = 1
if i == 0:
store = event_rows
if i > 0:
store = np.concatenate((store, event_rows), axis = 0)
res[z, :, :] = store
events = np.nonzero(store)
for d in range(np.shape(events)[1]):
res_2[z, events[0][d]] = events[1][d]
res_3[z, events[0][d]] = 1
np.save('spikes_all_.txt', res)
np.save('spike_times_all_.txt', res_2)
np.save('spike_weights_all_.txt', res_3)
# In[6]:
# READ AND GET FEATURES TEST DATA
f = gzip.open('t10k-images-idx3-ubyte.gz','r')
image_size = 28 #image size
num_images = 10000 #number of images to read out
f.read(16) #related to position of image
buf = f.read(image_size * image_size * num_images)
data = np.frombuffer(buf, dtype=np.uint8).astype(np.float32)
data = data.reshape(num_images, image_size, image_size, 1)
res = np.zeros((num_images, num_angles * 3, 10))
res_2 = | np.zeros((num_images, num_angles * 3)) | numpy.zeros |
from __future__ import print_function
import numpy as np
import pickle as pkl
import networkx as nx
import scipy.io as sio
import scipy.sparse as sp
import scipy.sparse.linalg as slinalg
import scipy.linalg as linalg
from scipy.sparse.linalg.eigen.arpack import eigsh
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.preprocessing import normalize
import sys
from os import path
import copy
import os
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
import time
import random
import tensorflow as tf
# import matplotlib.pyplot as plt
def save_sparse_csr(filename, array):
np.savez(filename, data=array.data, indices=array.indices,
indptr=array.indptr, shape=array.shape)
def load_sparse_csr(filename):
loader = np.load(filename)
return sp.csr_matrix((loader['data'], loader['indices'], loader['indptr']),
shape=loader['shape'])
def parse_index_file(filename):
"""Parse index file."""
index = []
for line in open(filename):
index.append(int(line.strip()))
return index
def sample_mask(idx, l):
"""Create mask."""
mask = np.zeros(l)
mask[idx] = 1
return np.array(mask, dtype=np.bool)
def get_triplet(y_train, train_mask, max_triplets):
# print('y_train----',y_train.shape)
index_nonzero = y_train.nonzero()
# for i in range(y_train.shape[1]):
# label_count.append(index_nonzero[1][[index_nonzero[1]==i]].size)
label_count = np.sum(y_train, axis=0)
all_count = np.sum(label_count)
index_nonzero = np.transpose(np.concatenate((index_nonzero[0][np.newaxis,:], index_nonzero[1]\
[np.newaxis, :]),axis=0)).tolist()
index_nonzero = sorted(index_nonzero, key = lambda s: s[1])
#print(index_nonzero)
#print(label_count)
def get_one_triplet(input_index, index_nonzero, label_count, all_count, max_triplets):
triplet = []
if label_count[input_index[1]]==0:
return 0
else:
# print('max_triplets', max_triplets)
# print(all_count)
# print(label_count[input_index[1]])
n_triplets = min(max_triplets, int(all_count-label_count[input_index[1]]))
# print('----------')
for j in range(int(label_count[input_index[1]])-1):
positives = []
negatives = []
for k, (value, label) in enumerate(index_nonzero):
#find a postive sample, and if only one sample then choose itself
if label == input_index[1] and (value != input_index[0] or label_count[input_index[1]]==1):
positives.append(index_nonzero[k])
if label != input_index[1]:
negatives.append(index_nonzero[k])
# print('positives' ,positives)
# print('negatives', negatives)
negatives = random.sample(list(negatives), n_triplets)
for value, label in negatives:
triplet.append([input_index[0], positives[j][0], value])
return triplet
triplet = []
for i, j in enumerate(index_nonzero):
triple = get_one_triplet(j, index_nonzero, label_count, all_count,max_triplets)
if triple == 0:
continue
else:
triplet.extend(triple)
np_triple = np.concatenate(np.array([triplet]), axis = 1)
return np_triple
def load_data(dataset_str, train_size, validation_size, model_config, shuffle=True):
"""Load data."""
if dataset_str in ['USPS-Fea', 'CIFAR-Fea', 'Cifar_10000_fea', 'Cifar_R10000_fea', 'MNIST-Fea', 'MNIST-10000', 'MNIST-5000']:
data = sio.loadmat('data/{}.mat'.format(dataset_str))
l = data['labels'].flatten()
labels = np.zeros([l.shape[0],np.max(data['labels'])+1])
labels[np.arange(l.shape[0]), l.astype(np.int8)] = 1
features = data['X']
sample = features[0].copy()
adj = data['G']
else:
names = ['x', 'y', 'tx', 'ty', 'allx', 'ally', 'graph']
objects = []
for i in range(len(names)):
with open("data/ind.{}.{}".format(dataset_str, names[i]), 'rb') as f:
if sys.version_info > (3, 0):
objects.append(pkl.load(f, encoding='latin1'))
else:
objects.append(pkl.load(f))
x, y, tx, ty, allx, ally, graph = tuple(objects)
adj = nx.adjacency_matrix(nx.from_dict_of_lists(graph))
test_idx_reorder = parse_index_file("data/ind.{}.test.index".format(dataset_str))
test_idx_range = np.sort(test_idx_reorder)
if dataset_str == 'citeseer':
# Fix citeseer dataset (there are some isolated nodes in the graph)
# Find isolated nodes, add them as zero-vecs into the right position
test_idx_range_full = range(min(test_idx_reorder), max(test_idx_reorder) + 1)
tx_extended = sp.lil_matrix((len(test_idx_range_full), x.shape[1]))
tx_extended[test_idx_range - min(test_idx_range), :] = tx
tx = tx_extended
ty_extended = np.zeros((len(test_idx_range_full), y.shape[1]))
ty_extended[test_idx_range - min(test_idx_range), :] = ty
ty = ty_extended
features = sp.vstack((allx, tx)).tolil()
# features = sp.eye(features.shape[0]).tolil()
# features = sp.lil_matrix(allx)
labels = np.vstack((ally, ty))
# labels = np.vstack(ally)
if dataset_str.startswith('nell'):
# Find relation nodes, add them as zero-vecs into the right position
test_idx_range_full = range(allx.shape[0], len(graph))
isolated_node_idx = np.setdiff1d(test_idx_range_full, test_idx_reorder)
tx_extended = sp.lil_matrix((len(test_idx_range_full), x.shape[1]))
tx_extended[test_idx_range - allx.shape[0], :] = tx
tx = tx_extended
ty_extended = np.zeros((len(test_idx_range_full), y.shape[1]))
ty_extended[test_idx_range - allx.shape[0], :] = ty
ty = ty_extended
features = sp.vstack((allx, tx)).tolil()
features[test_idx_reorder, :] = features[test_idx_range, :]
labels = np.vstack((ally, ty))
labels[test_idx_reorder, :] = labels[test_idx_range, :]
idx_all = np.setdiff1d(range(len(graph)), isolated_node_idx)
if not os.path.isfile("data/planetoid/{}.features.npz".format(dataset_str)):
print("Creating feature vectors for relations - this might take a while...")
features_extended = sp.hstack((features, sp.lil_matrix((features.shape[0], len(isolated_node_idx)))),
dtype=np.int32).todense()
features_extended[isolated_node_idx, features.shape[1]:] = np.eye(len(isolated_node_idx))
features = sp.csr_matrix(features_extended, dtype=np.float32)
print("Done!")
save_sparse_csr("data/planetoid/{}.features".format(dataset_str), features)
else:
features = load_sparse_csr("data/planetoid/{}.features.npz".format(dataset_str))
adj = nx.adjacency_matrix(nx.from_dict_of_lists(graph))
features[test_idx_reorder, :] = features[test_idx_range, :]
labels[test_idx_reorder, :] = labels[test_idx_range, :]
features = preprocess_features(features, feature_type=model_config['feature'])
global all_labels
all_labels = labels.copy()
# split the data set
idx = np.arange(len(labels))
no_class = labels.shape[1] # number of class
# validation_size = validation_size * len(idx) // 100
# if not hasattr(train_size, '__getitem__'):
train_size = [train_size for i in range(labels.shape[1])]
if shuffle:
np.random.shuffle(idx)
idx_train = []
count = [0 for i in range(no_class)]
label_each_class = train_size
next = 0
for i in idx:
if count == label_each_class:
break
next += 1
for j in range(no_class):
if labels[i, j] and count[j] < label_each_class[j]:
idx_train.append(i)
count[j] += 1
test_size = model_config['test_size']
if model_config['validate']:
if test_size:
assert next+validation_size<len(idx)
idx_val = idx[next:next+validation_size]
assert next+validation_size+test_size < len(idx)
idx_test = idx[-test_size:] if test_size else idx[next+validation_size:]
else:
if test_size:
assert next+test_size<len(idx)
idx_val = idx[-test_size:] if test_size else idx[next:]
idx_test = idx[-test_size:] if test_size else idx[next:]
# else:
# labels_of_class = [0]
# while (np.prod(labels_of_class) == 0):
# np.random.shuffle(idx)
# idx_train = idx[0:int(len(idx) * train_size // 100)]
# labels_of_class = np.sum(labels[idx_train], axis=0)
# idx_val = idx[-500 - validation_size:-500]
# idx_test = idx[-500:]
print('labels of each class : ', np.sum(labels[idx_train], axis=0))
# idx_val = idx[len(idx) * train_size // 100:len(idx) * (train_size // 2 + 50) // 100]
# idx_test = idx[len(idx) * (train_size // 2 + 50) // 100:len(idx)]
train_mask = sample_mask(idx_train, labels.shape[0])
val_mask = sample_mask(idx_val, labels.shape[0])
test_mask = sample_mask(idx_test, labels.shape[0])
y_train = np.zeros(labels.shape)
y_val = np.zeros(labels.shape)
y_test = np.zeros(labels.shape)
y_train[train_mask, :] = labels[train_mask, :]
y_val[val_mask, :] = labels[val_mask, :]
y_test[test_mask, :] = labels[test_mask, :]
# else:
# idx_test = test_idx_range.tolist()
# idx_train = range(len(y))
# idx_val = range(len(y), len(y) + 500)
#
# train_mask = sample_mask(idx_train, labels.shape[0])
# val_mask = sample_mask(idx_val, labels.shape[0])
# test_mask = sample_mask(idx_test, labels.shape[0])
#
# y_train = np.zeros(labels.shape)
# y_val = np.zeros(labels.shape)
# y_test = np.zeros(labels.shape)
# y_train[train_mask, :] = labels[train_mask, :]
# y_val[val_mask, :] = labels[val_mask, :]
# y_test[test_mask, :] = labels[test_mask, :]
size_of_each_class = np.sum(labels[idx_train], axis=0)
return adj, features, y_train, y_val, y_test, train_mask, val_mask, test_mask
def sparse_to_tuple(sparse_mx):
"""Convert sparse matrix to tuple representation."""
def to_tuple(mx):
if not sp.isspmatrix_coo(mx):
mx = mx.tocoo()
coords = np.vstack((mx.row, mx.col)).transpose()
values = mx.data
shape = mx.shape
return tf.SparseTensorValue(coords, values, np.array(shape, dtype=np.int64))
if isinstance(sparse_mx, list):
for i in range(len(sparse_mx)):
sparse_mx[i] = to_tuple(sparse_mx[i])
else:
sparse_mx = to_tuple(sparse_mx)
return sparse_mx
def preprocess_features(features, feature_type):
if feature_type == 'bow':
# """Row-normalize feature matrix and convert to tuple representation"""
rowsum = np.array(features.sum(1))
r_inv = | np.power(rowsum, -1) | numpy.power |
# --------------------------------------------------------
# P2ORM: Formulation, Inference & Application
# Licensed under The MIT License [see LICENSE for details]
# Written by <NAME>
# --------------------------------------------------------
import torch
import numpy as np
from sklearn.metrics import average_precision_score
import sys
sys.path.append('../..')
from .utility import AverageMeter
from lib.dataset.gen_label_methods import occ_order_pred_to_edge_prob
class MetricsEvaluator(object):
"""object for train/val evaluation with relevant metric"""
def __init__(self, config, isTrain):
self.config = config
self.isTrain = isTrain
# occ edge metrics
self.mIoU_edge = AverageMeter()
self.mF1_edge = AverageMeter()
self.AP_edge = AverageMeter()
self.perf_edge = PixLabelEvaluator(num_class=2, cls_sel_ind=[1])
self.curr_mIoU_edge = 0.
self.curr_mF1_edge = 0.
self.curr_AP_edge = 0.
# occ order metrics
if self.config.network.task_type == 'occ_order':
self.mIoU_E = AverageMeter()
self.mIoU_S = AverageMeter()
self.mIoU_SE = AverageMeter()
self.mIoU_NE = AverageMeter()
self.mF1_E = AverageMeter()
self.mF1_S = AverageMeter()
self.mF1_SE = AverageMeter()
self.mF1_NE = AverageMeter()
self.perf_E = PixLabelEvaluator(3, config.TEST.class_sel_ind)
self.perf_S = PixLabelEvaluator(3, config.TEST.class_sel_ind)
self.perf_SE = PixLabelEvaluator(3, config.TEST.class_sel_ind)
self.perf_NE = PixLabelEvaluator(3, config.TEST.class_sel_ind)
self.curr_mIoU_E = 0.
self.curr_mIoU_S = 0.
self.curr_mIoU_SE = 0.
self.curr_mIoU_NE = 0.
self.curr_mF1_E = 0.
self.curr_mF1_S = 0.
self.curr_mF1_SE = 0.
self.curr_mF1_NE = 0.
def cal_batch_metrics(self, net_out, targets):
if self.config.network.task_type == 'occ_order':
# cal occ order metrics
self.perf_E.cal_batch(targets[0], net_out[:, 0:3, :, :])
self.perf_S.cal_batch(targets[1], net_out[:, 3:6, :, :])
self.curr_mIoU_E, _ = self.perf_E.Mean_IoU()
self.curr_mIoU_S, _ = self.perf_S.Mean_IoU()
self.curr_mF1_E, _ = self.perf_E.Mean_F1_Score()
self.curr_mF1_S, _ = self.perf_S.Mean_F1_Score()
self.mIoU_E.update(self.curr_mIoU_E.item(), targets[0].size(0))
self.mIoU_S.update(self.curr_mIoU_S.item(), targets[1].size(0))
self.mF1_E.update(self.curr_mF1_E.item(), targets[0].size(0))
self.mF1_S.update(self.curr_mF1_S.item(), targets[1].size(0))
if self.config.dataset.connectivity == 8:
self.perf_SE.cal_batch(targets[2], net_out[:, 6:9, :, :])
self.perf_NE.cal_batch(targets[3], net_out[:, 9:12, :, :])
self.curr_mIoU_SE, _ = self.perf_SE.Mean_IoU()
self.curr_mIoU_NE, _ = self.perf_NE.Mean_IoU()
self.curr_mF1_SE, _ = self.perf_SE.Mean_F1_Score()
self.curr_mF1_NE, _ = self.perf_NE.Mean_F1_Score()
self.mIoU_SE.update(self.curr_mIoU_SE.item(), targets[2].size(0))
self.mIoU_NE.update(self.curr_mIoU_NE.item(), targets[3].size(0))
self.mF1_SE.update(self.curr_mF1_SE.item(), targets[2].size(0))
self.mF1_NE.update(self.curr_mF1_NE.item(), targets[3].size(0))
# cal occ edge metrics
if not self.isTrain:
occ_edge_prob, _ = occ_order_pred_to_edge_prob(net_out, self.config.dataset.connectivity)
occ_edge_prob_flat = np.array(occ_edge_prob.detach().cpu()).flatten()
occ_edge_gt_flat = np.array(targets[-1].detach().cpu()).flatten().astype(np.int16)
self.curr_AP_edge = average_precision_score(occ_edge_gt_flat, occ_edge_prob_flat) * 100 # percentage
self.AP_edge.update(self.curr_AP_edge, targets[-1].size(0))
elif self.config.network.task_type == 'occ_ori':
# cal occ edge metrics
occ_edge_prob = net_out[:, 0, :, :].unsqueeze(dim=1) # N,1,H,W
occ_edge_hard = (occ_edge_prob > 0.5).float()
occ_edge_gt = targets[-1] # N,H,W
self.perf_edge.cal_batch(occ_edge_gt, occ_edge_hard)
self.curr_mIoU_edge, _ = self.perf_edge.Mean_IoU()
self.curr_mF1_edge, _ = self.perf_edge.Mean_F1_Score()
self.mIoU_edge.update(self.curr_mIoU_edge, targets[-1].size(0))
self.mF1_edge.update(self.curr_mF1_edge, targets[-1].size(0))
if not self.isTrain:
occ_edge_gt_flat = np.array(targets[-1].detach().cpu()).flatten().astype(np.int16)
occ_edge_prob_flat = np.array(occ_edge_prob.detach().cpu()).flatten()
self.curr_AP_edge = average_precision_score(occ_edge_gt_flat, occ_edge_prob_flat) * 100 # percentage
self.AP_edge.update(self.curr_AP_edge, targets[-1].size(0))
def cal_set_metrics(self, isTest=False):
"""cal avg perf over whole train/val set"""
if self.config.network.task_type == 'occ_order':
if self.config.dataset.connectivity == 4:
self.mIoU_all = (self.mIoU_E.avg + self.mIoU_S.avg) / 2
self.mF1_all = (self.mF1_E.avg + self.mF1_S.avg) / 2
self.mIoUs = [self.mIoU_E.avg, self.mIoU_S.avg]
self.mF1s = [self.mF1_E.avg, self.mF1_S.avg]
if isTest:
self.perf_E.confusion_matrix_curr = self.perf_E.confusion_matrix_all
self.perf_S.confusion_matrix_curr = self.perf_S.confusion_matrix_all
_, prec_E_classes = self.perf_E.Mean_Precision()
_, prec_S_classes = self.perf_S.Mean_Precision()
_, recall_E_classes = self.perf_E.Mean_Recall()
_, recall_S_classes = self.perf_S.Mean_Recall()
self.prec_all = (prec_E_classes + prec_S_classes) / 2
self.recall_all = (recall_E_classes + recall_S_classes) / 2
elif self.config.dataset.connectivity == 8:
self.mIoU_all = (self.mIoU_E.avg + self.mIoU_S.avg +
self.mIoU_SE.avg + self.mIoU_NE.avg) / 4
self.mF1_all = (self.mF1_E.avg + self.mF1_S.avg +
self.mF1_SE.avg + self.mF1_NE.avg) / 4
self.mIoUs = [self.mIoU_E.avg, self.mIoU_S.avg,
self.mIoU_SE.avg, self.mIoU_NE.avg]
self.mF1s = [self.mF1_E.avg, self.mF1_S.avg,
self.mF1_SE.avg, self.mF1_NE.avg]
if isTest:
self.perf_E.confusion_matrix_curr = self.perf_E.confusion_matrix_all
self.perf_S.confusion_matrix_curr = self.perf_S.confusion_matrix_all
self.perf_SE.confusion_matrix_curr = self.perf_SE.confusion_matrix_all
self.perf_NE.confusion_matrix_curr = self.perf_NE.confusion_matrix_all
_, prec_E_classes = self.perf_E.Mean_Precision()
_, prec_S_classes = self.perf_S.Mean_Precision()
_, prec_SE_classes = self.perf_SE.Mean_Precision()
_, prec_NE_classes = self.perf_NE.Mean_Precision()
_, recall_E_classes = self.perf_E.Mean_Recall()
_, recall_S_classes = self.perf_S.Mean_Recall()
_, recall_SE_classes = self.perf_SE.Mean_Recall()
_, recall_NE_classes = self.perf_NE.Mean_Recall()
self.prec_all = (prec_E_classes + prec_S_classes +
prec_SE_classes + prec_NE_classes) / 4
self.recall_all = (recall_E_classes + recall_S_classes +
recall_SE_classes + recall_NE_classes) / 4
elif self.config.network.task_type == 'occ_ori':
self.mIoU_all = self.mIoU_edge.avg
self.mF1_all = self.mF1_edge.avg
self.mIoUs = [self.mIoU_edge.avg]
self.mF1s = [self.mF1_edge.avg]
class PixLabelEvaluator(object):
"""
pixel-wise labeling evaluator
derived from https://github.com/jfzhang95/pytorch-deeplab-xception/blob/master/utils/metrics.py
"""
def __init__(self, num_class, cls_sel_ind):
self.num_class = num_class
self.cls_sel_ind = cls_sel_ind
self.confusion_matrix_curr = np.zeros((self.num_class,) * 2)
self.confusion_matrix_all = | np.zeros((self.num_class,) * 2) | numpy.zeros |
import os
from collections import defaultdict
import numpy as np
import copy
import pickle
import scipy.sparse
from PIL import Image
import h5py, json
import torch
from pycocotools.coco import COCO
from torch.utils.data import Dataset
from lib.scene_parser.rcnn.structures.bounding_box import BoxList
from lib.utils.box import bbox_overlaps
class vg_hdf5(Dataset):
def __init__(self, cfg, split="train", transforms=None, num_im=-1, num_val_im=0,
filter_duplicate_rels=True, filter_non_overlap=True, filter_empty_rels=True):
assert split == "train" or split == "test", "split must be one of [train, val, test]"
assert num_im >= -1, "the number of samples must be >= 0"
# split = 'train' if split == 'test' else 'test'
self.data_dir = cfg.DATASET.PATH
self.transforms = transforms
self.split = split
self.filter_non_overlap = filter_non_overlap
self.filter_duplicate_rels = filter_duplicate_rels and self.split == 'train'
self.roidb_file = os.path.join(self.data_dir, "VG-SGG.h5")
self.image_file = os.path.join(self.data_dir, "imdb_1024.h5")
# read in dataset from a h5 file and a dict (json) file
assert os.path.exists(self.data_dir), \
"cannot find folder {}, please download the visual genome data into this folder".format(self.data_dir)
self.im_h5 = h5py.File(self.image_file, 'r')
self.info = json.load(open(os.path.join(self.data_dir, "VG-SGG-dicts.json"), 'r'))
self.im_refs = self.im_h5['images'] # image data reference
im_scale = self.im_refs.shape[2]
# add background class
self.info['label_to_idx']['__background__'] = 0
self.class_to_ind = self.info['label_to_idx']
self.ind_to_classes = sorted(self.class_to_ind, key=lambda k:
self.class_to_ind[k])
# cfg.ind_to_class = self.ind_to_classes
self.predicate_to_ind = self.info['predicate_to_idx']
self.predicate_to_ind['__background__'] = 0
self.ind_to_predicates = sorted(self.predicate_to_ind, key=lambda k:
self.predicate_to_ind[k])
# cfg.ind_to_predicate = self.ind_to_predicates
self.split_mask, self.image_index, self.im_sizes, self.gt_boxes, self.gt_classes, self.relationships = load_graphs(
self.roidb_file, self.image_file,
self.split, num_im, num_val_im=num_val_im,
filter_empty_rels=filter_empty_rels,
filter_non_overlap=filter_non_overlap and split == "train",
)
self.json_category_id_to_contiguous_id = self.class_to_ind
self.contiguous_category_id_to_json_id = {
v: k for k, v in self.json_category_id_to_contiguous_id.items()
}
@property
def coco(self):
"""
:return: a Coco-like object that we can use to evaluate detection!
"""
anns = []
for i, (cls_array, box_array) in enumerate(zip(self.gt_classes, self.gt_boxes)):
for cls, box in zip(cls_array.tolist(), box_array.tolist()):
anns.append({
'area': (box[3] - box[1] + 1) * (box[2] - box[0] + 1),
'bbox': [box[0], box[1], box[2] - box[0] + 1, box[3] - box[1] + 1],
'category_id': cls,
'id': len(anns),
'image_id': i,
'iscrowd': 0,
})
fauxcoco = COCO()
fauxcoco.dataset = {
'info': {'description': 'ayy lmao'},
'images': [{'id': i} for i in range(self.__len__())],
'categories': [{'supercategory': 'person',
'id': i, 'name': name} for i, name in enumerate(self.ind_to_classes) if name != '__background__'],
'annotations': anns,
}
fauxcoco.createIndex()
return fauxcoco
def _im_getter(self, idx):
w, h = self.im_sizes[idx, :]
ridx = self.image_index[idx]
im = self.im_refs[ridx]
im = im[:, :h, :w] # crop out
im = im.transpose((1,2,0)) # c h w -> h w c
return im
def __len__(self):
return len(self.image_index)
def __getitem__(self, index):
"""
get dataset item
"""
# get image
img = Image.fromarray(self._im_getter(index)); width, height = img.size
# get object bounding boxes, labels and relations
obj_boxes = self.gt_boxes[index].copy()
obj_labels = self.gt_classes[index].copy()
obj_relation_triplets = self.relationships[index].copy()
if self.filter_duplicate_rels:
# Filter out dupes!
assert self.split == 'train'
old_size = obj_relation_triplets.shape[0]
all_rel_sets = defaultdict(list)
for (o0, o1, r) in obj_relation_triplets:
all_rel_sets[(o0, o1)].append(r)
obj_relation_triplets = [(k[0], k[1], np.random.choice(v)) for k,v in all_rel_sets.items()]
obj_relation_triplets = np.array(obj_relation_triplets)
obj_relations = np.zeros((obj_boxes.shape[0], obj_boxes.shape[0]))
for i in range(obj_relation_triplets.shape[0]):
subj_id = obj_relation_triplets[i][0]
obj_id = obj_relation_triplets[i][1]
pred = obj_relation_triplets[i][2]
obj_relations[subj_id, obj_id] = pred
target_raw = BoxList(obj_boxes, (width, height), mode="xyxy")
img, target = self.transforms(img, target_raw)
target.add_field("labels", torch.from_numpy(obj_labels))
target.add_field("pred_labels", torch.from_numpy(obj_relations))
target.add_field("relation_labels", torch.from_numpy(obj_relation_triplets))
target = target.clip_to_image(remove_empty=False)
return img, target, index
def get_groundtruth(self, index):
width, height = self.im_sizes[index, :]
# get object bounding boxes, labels and relations
obj_boxes = self.gt_boxes[index].copy()
obj_labels = self.gt_classes[index].copy()
obj_relation_triplets = self.relationships[index].copy()
if self.filter_duplicate_rels:
# Filter out dupes!
assert self.split == 'train'
old_size = obj_relation_triplets.shape[0]
all_rel_sets = defaultdict(list)
for (o0, o1, r) in obj_relation_triplets:
all_rel_sets[(o0, o1)].append(r)
obj_relation_triplets = [(k[0], k[1], np.random.choice(v)) for k,v in all_rel_sets.items()]
obj_relation_triplets = np.array(obj_relation_triplets)
obj_relations = np.zeros((obj_boxes.shape[0], obj_boxes.shape[0]))
for i in range(obj_relation_triplets.shape[0]):
subj_id = obj_relation_triplets[i][0]
obj_id = obj_relation_triplets[i][1]
pred = obj_relation_triplets[i][2]
obj_relations[subj_id, obj_id] = pred
target = BoxList(obj_boxes, (width, height), mode="xyxy")
target.add_field("labels", torch.from_numpy(obj_labels))
target.add_field("pred_labels", torch.from_numpy(obj_relations))
target.add_field("relation_labels", torch.from_numpy(obj_relation_triplets))
target.add_field("difficult", torch.from_numpy(obj_labels).clone().fill_(0))
return target
def get_img_info(self, img_id):
w, h = self.im_sizes[img_id, :]
return {"height": h, "width": w}
def map_class_id_to_class_name(self, class_id):
return self.ind_to_classes[class_id]
def load_graphs(graphs_file, images_file, mode='train', num_im=-1, num_val_im=0, filter_empty_rels=True,
filter_non_overlap=False):
"""
Load the file containing the GT boxes and relations, as well as the dataset split
:param graphs_file: HDF5
:param mode: (train, val, or test)
:param num_im: Number of images we want
:param num_val_im: Number of validation images
:param filter_empty_rels: (will be filtered otherwise.)
:param filter_non_overlap: If training, filter images that dont overlap.
:return: image_index: numpy array corresponding to the index of images we're using
boxes: List where each element is a [num_gt, 4] array of ground
truth boxes (x1, y1, x2, y2)
gt_classes: List where each element is a [num_gt] array of classes
relationships: List where each element is a [num_r, 3] array of
(box_ind_1, box_ind_2, predicate) relationships
"""
if mode not in ('train', 'val', 'test'):
raise ValueError('{} invalid'.format(mode))
roi_h5 = h5py.File(graphs_file, 'r')
im_h5 = h5py.File(images_file, 'r')
data_split = roi_h5['split'][:]
split = 2 if mode == 'test' else 0
split_mask = data_split == split
# Filter out images without bounding boxes
split_mask &= roi_h5['img_to_first_box'][:] >= 0
if filter_empty_rels:
split_mask &= roi_h5['img_to_first_rel'][:] >= 0
image_index = np.where(split_mask)[0]
if num_im > -1:
image_index = image_index[:num_im]
if num_val_im > 0:
if mode == 'val':
image_index = image_index[:num_val_im]
elif mode == 'train':
image_index = image_index[num_val_im:]
split_mask = np.zeros_like(data_split).astype(bool)
split_mask[image_index] = True
# Get box information
all_labels = roi_h5['labels'][:, 0]
all_boxes = roi_h5['boxes_{}'.format(1024)][:] # will index later
assert np.all(all_boxes[:, :2] >= 0) # sanity check
assert np.all(all_boxes[:, 2:] > 0) # no empty box
# convert from xc, yc, w, h to x1, y1, x2, y2
all_boxes[:, :2] = all_boxes[:, :2] - all_boxes[:, 2:] / 2
all_boxes[:, 2:] = all_boxes[:, :2] + all_boxes[:, 2:]
im_to_first_box = roi_h5['img_to_first_box'][split_mask]
im_to_last_box = roi_h5['img_to_last_box'][split_mask]
im_to_first_rel = roi_h5['img_to_first_rel'][split_mask]
im_to_last_rel = roi_h5['img_to_last_rel'][split_mask]
im_widths = im_h5["image_widths"][split_mask]
im_heights = im_h5["image_heights"][split_mask]
# load relation labels
_relations = roi_h5['relationships'][:]
_relation_predicates = roi_h5['predicates'][:, 0]
assert (im_to_first_rel.shape[0] == im_to_last_rel.shape[0])
assert (_relations.shape[0] == _relation_predicates.shape[0]) # sanity check
# Get everything by image.
im_sizes = []
image_index_valid = []
boxes = []
gt_classes = []
relationships = []
for i in range(len(image_index)):
boxes_i = all_boxes[im_to_first_box[i]:im_to_last_box[i] + 1, :]
gt_classes_i = all_labels[im_to_first_box[i]:im_to_last_box[i] + 1]
if im_to_first_rel[i] >= 0:
predicates = _relation_predicates[im_to_first_rel[i]:im_to_last_rel[i] + 1]
obj_idx = _relations[im_to_first_rel[i]:im_to_last_rel[i] + 1] - im_to_first_box[i]
assert np.all(obj_idx >= 0)
assert np.all(obj_idx < boxes_i.shape[0])
rels = np.column_stack((obj_idx, predicates))
else:
assert not filter_empty_rels
rels = | np.zeros((0, 3), dtype=np.int32) | numpy.zeros |
import numpy as np
from pyfmmlib2d import SFMM
def stokes_kernel(sx, tx):
ns = sx.shape[1]
nt = tx.shape[1]
dx = tx[0][:,None] - sx[0]
dy = tx[1][:,None] - sx[1]
d2 = dx**2 + dy**2
d = np.sqrt(d2)
logr = np.log(d)
c = 0.25/np.pi
G00 = (-logr + dx*dx/d2)
G01 = dx*dy/d2
G11 = (-logr + dy*dy/d2)
u_mat = np.zeros([nt, 2*ns], dtype=float)
v_mat = np.zeros([nt, 2*ns], dtype=float)
u_mat[:, 0*ns:1*ns] = c*G00
u_mat[:, 1*ns:2*ns] = c*G01
v_mat[:, 0*ns:1*ns] = c*G01
v_mat[:, 1*ns:2*ns] = c*G11
return u_mat, v_mat
def stokes_kernel_stress(sx, tx):
ns = sx.shape[1]
nt = tx.shape[1]
dx = tx[0][:,None] - sx[0]
dy = tx[1][:,None] - sx[1]
d2 = dx**2 + dy**2
d4 = d2*d2
# forces part, u_x
c = 0.25/np.pi
G00 = (dx*dy**2 - dx**3)/d4
G01 = (dy**3 - dx**2*dy)/d4
ux_mat = np.zeros([nt, 2*ns])
ux_mat[:, 0*ns:1*ns] = c*G00
ux_mat[:, 1*ns:2*ns] = c*G01
# forces part, u_y
G00 = (-3*dy*dx**2-dy**3)/d4
G01 = (dx**3 - dx*dy**2)/d4
uy_mat = np.zeros([nt, 2*ns])
uy_mat[:, 0*ns:1*ns] = c*G00
uy_mat[:, 1*ns:2*ns] = c*G01
# forces part, v_x
G01 = (dy**3 - dy*dx**2)/d4
G11 = (-3*dx*dy**2-dx**3)/d4
vx_mat = np.zeros([nt, 2*ns])
vx_mat[:, 0*ns:1*ns] = c*G01
vx_mat[:, 1*ns:2*ns] = c*G11
# forces part, v_y
G01 = (dx**3 - dy**2*dx)/d4
G11 = (dy*dx**2 - dy**3)/d4
vy_mat = np.zeros([nt, 2*ns])
vy_mat[:, 0*ns:1*ns] = c*G01
vy_mat[:, 1*ns:2*ns] = c*G11
# forces part, p
c = 0.5/np.pi
G0 = dx/d2
G1 = dy/d2
p_mat = np.zeros([nt, 2*ns])
p_mat[:, 0*ns:1*ns] = c*G0
p_mat[:, 1*ns:2*ns] = c*G1
return 2*ux_mat - p_mat, uy_mat + vx_mat, 2*vy_mat - p_mat
class periodized_stokes_fmm(object):
def __init__(self, bounds, p=16, eps=1e-14):
"""
Class to execute periodized Stokes FMM
bounds: [xmin, xmax, ymin, ymax] (location of periodic box)
p: order of expansion to use on periodic walls
"""
self.bounds = bounds
self.p = p
# compute the location of the collocation nodes
nodes, weights = np.polynomial.legendre.leggauss(p)
nodes = nodes * 0.5 + 0.5
ranx = bounds[1] - bounds[0]
rany = bounds[3] - bounds[2]
if np.abs(ranx - rany) > 1e-15:
raise Exception('For now, periodization bounds must be a square.')
self.width = ranx
self.weights = weights*0.5*self.width
nodey = nodes*rany + bounds[2]
rep = lambda x: np.repeat(x, p)
self.node_left = np.row_stack([ rep(bounds[0]), nodey ])
self.node_right = np.row_stack([ rep(bounds[1]), nodey ])
nodex = nodes*ranx + bounds[0]
self.node_bottom = np.row_stack([ nodex, rep(bounds[2]) ])
self.node_top = np.row_stack([ nodex, rep(bounds[3]) ])
self.check = np.column_stack([ self.node_left, self.node_right, \
self.node_bottom, self.node_top ])
# get normals (not outward facing!)
self.normal_left = np.row_stack([ rep(1.0), rep(0.0) ])
self.normal_right = np.row_stack([ rep(1.0), rep(0.0) ])
self.normal_bottom = np.row_stack([ rep(0.0), rep(1.0) ])
self.normal_top = np.row_stack([ rep(0.0), rep(1.0) ])
self.normals = np.column_stack([ self.normal_left, self.normal_right,
self.normal_bottom, self.normal_top ])
# generate sources
self.n_check = 4*p
self.n_sources = self.n_check
self.center = [ 0.5*(self.bounds[0]+self.bounds[1]),
0.5*(self.bounds[2]+self.bounds[3]) ]
radius = 0.5*np.sqrt(2)*self.width
adj = | np.log(eps) | numpy.log |
import numpy as np
from auto_editor.audiotsm2.base import AnalysisSynthesisTSM
from auto_editor.audiotsm2.utils.windows import hanning
class WSOLAConverter():
"""
A Converter implementing the WSOLA (Waveform Similarity-based Overlap-Add)
time-scale modification procedure.
"""
def __init__(self, channels, frame_length, synthesis_hop, tolerance):
self._channels = channels
self._frame_length = frame_length
self._synthesis_hop = synthesis_hop
self._tolerance = tolerance
self._synthesis_frame = np.empty((channels, frame_length))
self._natural_progression = np.empty((channels, frame_length))
self._first = True
def clear(self):
self._first = True
def convert_frame(self, analysis_frame):
for k in range(0, self._channels):
if self._first:
delta = 0
else:
cross_correlation = np.correlate(
analysis_frame[k, :-self._synthesis_hop],
self._natural_progression[k])
delta = | np.argmax(cross_correlation) | numpy.argmax |
"""Film Mode Matching Mode Solver
Implementation of the Film Mode Matching (FMM) algorithm, as described in:
- Sudbo, "Film mode matching a versatile numerical method for vector mode field calculations in dielectric waveguides", Pure App. Optics, 2 (1993), 211-233
- Sudbo, "Improved formulation of the film mode matching method for mode field calculations in dielectric waveguides", Pure App. Optics, 3 (1994), 381-388
Examples
========
See L{FMM1d} and L{FMM2d}.
"""
from __future__ import print_function
from builtins import zip
from builtins import range
from builtins import object
from functools import reduce
__author__ = '<NAME> & <NAME>'
import numpy
import scipy
import scipy.optimize
import copy
import EMpy.utils
from EMpy.modesolvers.interface import *
import pylab
class Message(object):
def __init__(self, msg, verbosity=0):
self.msg = msg
self.verbosity = verbosity
def show(self, verbosity=0):
if self.verbosity <= verbosity:
print((self.verbosity - 1) * '\t' + self.msg)
class Struct(object):
"""Empty class to fill with whatever I want. Maybe a dictionary would do?"""
pass
class Boundary(object):
"""Boundary conditions.
Electric and Magnetic boundary conditions are translated to Symmetric
and Antisymmetric for each field.
@ivar xleft: Left bc on x.
@ivar xright: Right bc on x.
@ivar yleft: Left bc on y.
@ivar yright: Right bc on y.
"""
def __init__(self, xleft='Electric Wall',
yleft='Magnetic Wall',
xright='Electric Wall',
yright='Magnetic Wall'):
"""Set the boundary conditions, validate and translate."""
self.xleft = xleft
self.yleft = yleft
self.xright = xright
self.yright = yright
self.validate()
self.translate()
def validate(self):
"""Validate the input.
@raise ValueError: Unknown boundary.
"""
if not reduce(lambda x, y: x & y,
[(x == 'Electric Wall') | (x == 'Magnetic Wall') for x in [self.xleft, self.yleft, self.xright, self.yright]]):
raise ValueError('Unknown boundary.')
def translate(self):
"""Translate for each field.
@raise ValueError: Unknown boundary.
"""
self.xh = ''
self.xe = ''
self.yh = ''
self.ye = ''
if self.xleft == 'Electric Wall':
self.xh += 'A'
self.xe += 'S'
elif self.xleft == 'Magnetic Wall':
self.xh += 'S'
self.xe += 'A'
else:
raise ValueError('Unknown boundary.')
if self.xright == 'Electric Wall':
self.xh += 'A'
self.xe += 'S'
elif self.xright == 'Magnetic Wall':
self.xh += 'S'
self.xe += 'A'
else:
raise ValueError('Unknown boundary.')
if self.yleft == 'Electric Wall':
self.yh += 'A'
self.ye += 'S'
elif self.yleft == 'Magnetic Wall':
self.yh += 'S'
self.ye += 'A'
else:
raise ValueError('Unknown boundary.')
if self.yright == 'Electric Wall':
self.yh += 'A'
self.ye += 'S'
elif self.yright == 'Magnetic Wall':
self.yh += 'S'
self.ye += 'A'
else:
raise ValueError('Unknown boundary.')
def __str__(self):
return 'xleft = %s, xright = %s, yleft = %s, yright = %s' % (self.xleft, self.xright, self.yleft, self.yright)
class Slice(object):
"""One dimensional arrangement of layers and 1d modes.
A slice is made of a stack of layers, i.e. refractive indeces with a thickness,
with given boundary conditions.
It holds 1d modes, both TE and TM.
@ivar x1: start point of the slice in x.
@ivar x2: end point of the slice in x.
@ivar Uy: array of points delimiting the layers.
@ivar boundary: boundary conditions.
@ivar modie: E modes.
@ivar modih: H modes.
@ivar Ux: array of points delimiting the slices in x (internally set).
@ivar refractiveindex: refractive index of all the slices (internally set).
@ivar epsilon: epsilon of all the slices (internally set).
@ivar wl: vacuum wavelength.
"""
def __init__(self, x1, x2, Uy, boundary, modie, modih):
self.x1 = x1
self.x2 = x2
self.Uy = Uy
self.boundary = boundary
self.modie = modie
self.modih = modih
def __str__(self):
return 'x1 = %g, x2 = %g\nUy = %s\nboundary = %s' % (self.x1, self.x2, self.Uy, self.boundary)
class FMMMode1d(Mode):
"""One dimensional mode.
Note
====
Virtual class.
"""
pass
class FMMMode1dx(FMMMode1d):
"""Matching coefficients in the x-direction.
L{FMMMode1dy}s are weighted by these coefficients to assure continuity.
"""
def __str__(self):
return 'sl = %s\nsr = %s\nal = %s\nar = %s\nk = %s\nU = %s' % \
(self.sl.__str__(),
self.sr.__str__(),
self.al.__str__(),
self.ar.__str__(),
self.k.__str__(),
self.U.__str__())
class FMMMode1dy(FMMMode1d):
"""One dimensional mode.
It holds the coefficients that describe the mode in the FMM expansion.
Note
====
The mode is suppose one dimensional, in the y direction.
@ivar sl: array of value of the mode at the lhs of each slice.
@ivar sr: array of value of the mode at the rhs of each slice.
@ivar al: array of value of the derivative of the mode at the lhs of each slice.
@ivar ar: array of value of the derivative of the mode at the lhs of each slice.
@ivar k: wavevector inside each layer.
@ivar keff: effective wavevector.
@ivar zero: how good the mode is? it must be as close to zero as possible!
@ivar Uy: array of points delimiting the layers.
"""
def eval(self, y_):
"""Evaluate the mode at y."""
y = numpy.atleast_1d(y_)
ny = len(y)
f = numpy.zeros(ny, dtype=complex)
for iU in range(len(self.U) - 1):
k = self.k[iU]
sl = self.sl[iU]
al = self.al[iU]
Ul = self.U[iU]
Ur = self.U[iU+1]
idx = numpy.where((Ul <= y) & (y <= Ur))
yy = y[idx] - Ul
f[idx] = sl * numpy.cos(k * yy) + al * sinxsux(k * yy) * yy
return f
def plot(self, y):
f = self.eval(y)
pylab.plot(y, numpy.real(f), y, numpy.imag(y))
pylab.legend(('real', 'imag'))
pylab.xlabel('y')
pylab.ylabel('mode1d')
pylab.show()
def __str__(self):
return 'sl = %s\nsr = %s\nal = %s\nar = %s\nk = %s\nkeff = %s\nzero = %s\nU = %s' % \
(self.sl.__str__(),
self.sr.__str__(),
self.al.__str__(),
self.ar.__str__(),
self.k.__str__(),
self.keff.__str__(),
self.zero.__str__(),
self.U.__str__())
class FMMMode2d(Mode):
"""Two dimensional mode.
It holds the coefficients that describe the mode in the FMM expansion.
"""
def get_x(self, n=100):
return numpy.linspace(self.slicesx[0].Ux[0], self.slicesx[0].Ux[-1], n)
def get_y(self, n=100):
return numpy.linspace(self.slicesx[0].Uy[0], self.slicesx[0].Uy[-1], n)
def eval(self, x_=None, y_=None):
"""Evaluate the mode at x,y."""
if x_ is None:
x = self.get_x()
else:
x = numpy.atleast_1d(x_)
if y_ is None:
y = self.get_y()
else:
y = numpy.atleast_1d(y_)
nmodi = len(self.modie)
lenx = len(x)
leny = len(y)
k0 = 2. * numpy.pi / self.slicesx[0].wl
kz = self.keff
uh = numpy.zeros((nmodi, lenx), dtype=complex)
ue = numpy.zeros_like(uh)
udoth = numpy.zeros_like(uh)
udote = numpy.zeros_like(uh)
Exsh = numpy.zeros((leny, nmodi), dtype=complex)
Exah = numpy.zeros_like(Exsh)
Exse = numpy.zeros_like(Exsh)
Exae = numpy.zeros_like(Exsh)
Eysh = numpy.zeros_like(Exsh)
Eyah = numpy.zeros_like(Exsh)
Eyse = numpy.zeros_like(Exsh)
Eyae = numpy.zeros_like(Exsh)
Ezsh = numpy.zeros_like(Exsh)
Ezah = numpy.zeros_like(Exsh)
Ezse = numpy.zeros_like(Exsh)
Ezae = numpy.zeros_like(Exsh)
cBxsh = numpy.zeros_like(Exsh)
cBxah = numpy.zeros_like(Exsh)
cBxse = numpy.zeros_like(Exsh)
cBxae = numpy.zeros_like(Exsh)
cBysh = numpy.zeros_like(Exsh)
cByah = numpy.zeros_like(Exsh)
cByse = numpy.zeros_like(Exsh)
cByae = numpy.zeros_like(Exsh)
cBzsh = numpy.zeros_like(Exsh)
cBzah = numpy.zeros_like(Exsh)
cBzse = numpy.zeros_like(Exsh)
cBzae = numpy.zeros_like(Exsh)
ExTE = numpy.zeros((leny,lenx), dtype=complex)
EyTE = numpy.zeros_like(ExTE)
EzTE = numpy.zeros_like(ExTE)
ExTM = numpy.zeros_like(ExTE)
EyTM = numpy.zeros_like(ExTE)
EzTM = numpy.zeros_like(ExTE)
cBxTE = numpy.zeros_like(ExTE)
cByTE = numpy.zeros_like(ExTE)
cBzTE = numpy.zeros_like(ExTE)
cBxTM = numpy.zeros_like(ExTE)
cByTM = numpy.zeros_like(ExTE)
cBzTM = numpy.zeros_like(ExTE)
for mx, slice in enumerate(self.slicesx):
idx = numpy.where((slice.x1 <= x) & (x < slice.x2))
x2 = x[idx] - slice.x1
x1 = slice.x2 - x[idx]
dx = slice.x2 - slice.x1
for n in range(nmodi):
fi = slice.modih[n].eval(y)
fidot = dot(slice.modih[n]).eval(y)
psi = slice.modie[n].eval(y)
psisueps = sueps(slice.modie[n]).eval(y)
psidotsueps = sueps(dot(slice.modie[n])).eval(y)
kfh = self.modih[n].k[mx]
kxh = scipy.sqrt(kfh**2 - kz**2)
sl = self.modih[n].sl[mx] * (k0/kfh)**2
al = self.modih[n].al[mx]
sr = self.modih[n].sr[mx] * (k0/kfh)**2
ar = self.modih[n].ar[mx]
uh[n,idx] = (numpy.sin(kxh * x1) * sl + numpy.sin(kxh * x2) * sr) / numpy.sin(kxh * dx)
udoth[n,idx] = (numpy.sin(kxh * x1) * al + numpy.sin(kxh * x2) * ar) / numpy.sin(kxh * dx)
kfe = self.modie[n].k[mx]
kxe = scipy.sqrt(kfe**2 - kz**2)
sl = self.modie[n].sl[mx] * (k0/kfe)**2
al = self.modie[n].al[mx]
sr = self.modie[n].sr[mx] * (k0/kfe)**2
ar = self.modie[n].ar[mx]
ue[n,idx] = (numpy.sin(kxe * x1) * sl + numpy.sin(kxe * x2) * sr) / numpy.sin(kxe * dx)
udote[n,idx] = (numpy.sin(kxe * x1) * al + numpy.sin(kxe * x2) * ar) / numpy.sin(kxe * dx)
Exsh[:,n] = (kz/k0) * fi
Exah[:,n] = 0
Exse[:,n] = 0
Exae[:,n] = -psidotsueps / k0**2
Eysh[:,n] = 0
Eyah[:,n] = 0
Eyse[:,n] = -(kfe/k0)**2 * psisueps
Eyae[:,n] = 0
Ezsh[:,n] = 0
Ezah[:,n] = -1j * fi / k0
Ezse[:,n] = 1j * kz / k0**2 * psidotsueps
Ezae[:,n] = 0
cBxsh[:,n] = 0
cBxah[:,n] = fidot / k0**2
cBxse[:,n] = kz / k0 * psi
cBxae[:,n] = 0
cBysh[:,n] = (kfh/k0)**2 * fi
cByah[:,n] = 0
cByse[:,n] = 0
cByae[:,n] = 0
cBzsh[:,n] = -1j * kz / k0**2 * fidot
cBzah[:,n] = 0
cBzse[:,n] = 0
cBzae[:,n] = -1j * psi / k0
ExTE[:,idx] = numpy.tensordot(Exsh, uh[:,idx], axes=1) + numpy.tensordot(Exah, udoth[:,idx], axes=1)
ExTM[:,idx] = numpy.tensordot(Exse, ue[:,idx], axes=1) + numpy.tensordot(Exae, udote[:,idx], axes=1)
EyTE[:,idx] = numpy.tensordot(Eysh, uh[:,idx], axes=1) + numpy.tensordot(Eyah, udoth[:,idx], axes=1)
EyTM[:,idx] = numpy.tensordot(Eyse, ue[:,idx], axes=1) + numpy.tensordot(Eyae, udote[:,idx], axes=1)
EzTE[:,idx] = numpy.tensordot(Ezsh, uh[:,idx], axes=1) + numpy.tensordot(Ezah, udoth[:,idx], axes=1)
EzTM[:,idx] = numpy.tensordot(Ezse, ue[:,idx], axes=1) + numpy.tensordot(Ezae, udote[:,idx], axes=1)
cBxTE[:,idx] = numpy.tensordot(cBxsh, uh[:,idx], axes=1) + numpy.tensordot(cBxah, udoth[:,idx], axes=1)
cBxTM[:,idx] = numpy.tensordot(cBxse, ue[:,idx], axes=1) + numpy.tensordot(cBxae, udote[:,idx], axes=1)
cByTE[:,idx] = numpy.tensordot(cBysh, uh[:,idx], axes=1) + numpy.tensordot(cByah, udoth[:,idx], axes=1)
cByTM[:,idx] = numpy.tensordot(cByse, ue[:,idx], axes=1) + numpy.tensordot(cByae, udote[:,idx], axes=1)
cBzTE[:,idx] = numpy.tensordot(cBzsh, uh[:,idx], axes=1) + numpy.tensordot(cBzah, udoth[:,idx], axes=1)
cBzTM[:,idx] = numpy.tensordot(cBzse, ue[:,idx], axes=1) + numpy.tensordot(cBzae, udote[:,idx], axes=1)
return (ExTE, ExTM, EyTE, EyTM, EzTE, EzTM, cBxTE, cBxTM, cByTE, cByTM, cBzTE, cBzTM)
def fields(self, x=None, y=None):
ExTE, ExTM, EyTE, EyTM, EzTE, EzTM, cBxTE, cBxTM, cByTE, cByTM, cBzTE, cBzTM = self.eval(x, y)
Ex = ExTE + ExTM
Ey = EyTE + EyTM
Ez = EzTE + EzTM
cBx = cBxTE + cBxTM
cBy = cByTE + cByTM
cBz = cBzTE + cBzTM
return (Ex, Ey, Ez, cBx, cBy, cBz)
def intensity(self, x=None, y=None):
Ex, Ey, Ez, cBx, cBy, cBz = self.fields(x, y)
cSz = .5 * (Ex * numpy.conj(cBy) - Ey * numpy.conj(cBx))
return cSz
def TEfrac_old(self, x_=None, y_=None):
if x_ is None:
x = self.get_x()
else:
x = numpy.atleast_1d(x_)
if y_ is None:
y = self.get_y()
else:
y = numpy.atleast_1d(y_)
Ex, Ey, Ez, cBx, cBy, cBz, cSz = self.fields(x, y)
cSTE = .5 * EMpy.utils.trapz2(Ex * numpy.conj(cBy), y, x)
cSTM = .5 * EMpy.utils.trapz2(-Ey * numpy.conj(cBx), y, x)
return numpy.abs(cSTE) / (numpy.abs(cSTE) + numpy.abs(cSTM))
def TEfrac(self):
Sx, Sy = self.__overlap(self)
return Sx / (Sx - Sy)
def overlap_old(self, m, x_=None, y_=None):
if x_ is None:
x = self.get_x()
else:
x = numpy.atleast_1d(x_)
if y_ is None:
y = self.get_y()
else:
y = numpy.atleast_1d(y_)
Ex, Ey, Ez, cBx, cBy, cBz = self.fields(x, y)
cSz = self.intensity(x, y)
norm = scipy.sqrt(EMpy.utils.trapz2(cSz, y, x))
Ex1, Ey1, Ez1, cBx1, cBy1, cBz1 = m.fields(x, y)
cSz1 = m.intensity(x, y)
norm1 = scipy.sqrt(EMpy.utils.trapz2(cSz1, y, x))
return .5 * EMpy.utils.trapz2(Ex/norm * numpy.conj(cBy1/norm1) - Ey/norm * numpy.conj(cBx1/norm1), y, x)
def __overlap_old(self, mode):
nmodi = len(self.modie)
k0 = 2. * numpy.pi / self.slicesx[0].wl
kz = self.keff
Sx = 0j
Sy = 0j
for mx, slice in enumerate(self.slicesx):
for n1 in range(nmodi):
phi_n1 = slice.modih[n1]
phidot_n1 = dot(phi_n1)
psi_n1 = slice.modie[n1]
psisueps_n1 = sueps(psi_n1)
psidotsueps_n1 = sueps(dot(psi_n1))
uh_n1 = copy.deepcopy(self.modih[n1])
# reduce to a single slice
kfh_n1 = uh_n1.k[mx]
uh_n1.k = numpy.atleast_1d(scipy.sqrt(kfh_n1**2 - kz**2))
uh_n1.sl = numpy.atleast_1d(uh_n1.sl[mx] * (k0/kfh_n1)**2)
uh_n1.al = numpy.atleast_1d(uh_n1.al[mx])
uh_n1.sr = numpy.atleast_1d(uh_n1.sr[mx] * (k0/kfh_n1)**2)
uh_n1.ar = numpy.atleast_1d(uh_n1.ar[mx])
uh_n1.U = numpy.atleast_1d(uh_n1.U[mx:mx+2])
uhdot_n1 = dot(uh_n1)
ue_n1 = copy.deepcopy(self.modie[n1])
# reduce to a single slice
kfe_n1 = ue_n1.k[mx]
ue_n1.k = numpy.atleast_1d(scipy.sqrt(kfe_n1**2 - kz**2))
ue_n1.sl = numpy.atleast_1d(ue_n1.sl[mx] * (k0/kfe_n1)**2)
ue_n1.al = numpy.atleast_1d(ue_n1.al[mx])
ue_n1.sr = numpy.atleast_1d(ue_n1.sr[mx] * (k0/kfe_n1)**2)
ue_n1.ar = numpy.atleast_1d(ue_n1.ar[mx])
ue_n1.U = numpy.atleast_1d(ue_n1.U[mx:mx+2])
uedot_n1 = dot(ue_n1)
for n2 in range(nmodi):
phi_n2 = mode.slicesx[mx].modih[n2]
phidot_n2 = dot(phi_n2)
psi_n2 = mode.slicesx[mx].modie[n2]
psisueps_n2 = sueps(psi_n2)
psidotsueps_n2 = sueps(dot(psi_n2))
uh_n2 = copy.deepcopy(mode.modih[n2])
# reduce to a single slice
kfh_n2 = uh_n2.k[mx]
uh_n2.k = numpy.atleast_1d(scipy.sqrt(kfh_n2**2 - kz**2))
uh_n2.sl = numpy.atleast_1d(uh_n2.sl[mx] * (k0/kfh_n2)**2)
uh_n2.al = numpy.atleast_1d(uh_n2.al[mx])
uh_n2.sr = numpy.atleast_1d(uh_n2.sr[mx] * (k0/kfh_n2)**2)
uh_n2.ar = numpy.atleast_1d(uh_n2.ar[mx])
uh_n2.U = numpy.atleast_1d(uh_n2.U[mx:mx+2])
uhdot_n2 = dot(uh_n2)
ue_n2 = copy.deepcopy(mode.modie[n2])
# reduce to a single slice
kfe_n2 = ue_n2.k[mx]
ue_n2.k = numpy.atleast_1d(scipy.sqrt(kfe_n2**2 - kz**2))
ue_n2.sl = numpy.atleast_1d(ue_n2.sl[mx] * (k0/kfe_n2)**2)
ue_n2.al = numpy.atleast_1d(ue_n2.al[mx])
ue_n2.sr = numpy.atleast_1d(ue_n2.sr[mx] * (k0/kfe_n2)**2)
ue_n2.ar = numpy.atleast_1d(ue_n2.ar[mx])
ue_n2.U = numpy.atleast_1d(ue_n2.U[mx:mx+2])
uedot_n2 = dot(ue_n2)
Sx += kz * kfh_n2**2 / k0**3 * scalarprod(uh_n1, uh_n2) * scalarprod(phi_n1, phi_n2) \
- kfh_n2**2 / k0**4 * scalarprod(uedot_n1, uh_n2) * scalarprod(psidotsueps_n1, phi_n2)
Sy += kfe_n1**2 * kz / k0**3 * scalarprod(ue_n1, ue_n2) * scalarprod(psisueps_n1, psi_n2) \
+ kfe_n1**2 / k0**4 * scalarprod(ue_n1, uhdot_n2) * scalarprod(psisueps_n1, phidot_n2)
return (Sx, Sy)
def __overlap(self, mode):
nmodi = len(self.modie)
k0 = 2. * numpy.pi / self.slicesx[0].wl
kz = self.keff
Sx = 0j
Sy = 0j
for mx, slice in enumerate(self.slicesx):
phi_n1s = []
phidot_n1s = []
psi_n1s = []
psisueps_n1s = []
psidotsueps_n1s = []
uh_n1s = []
uhdot_n1s = []
ue_n1s = []
uedot_n1s = []
kfe_n1s = []
kfh_n1s = []
phi_n2s = []
phidot_n2s = []
psi_n2s = []
psisueps_n2s = []
psidotsueps_n2s = []
uh_n2s = []
uhdot_n2s = []
ue_n2s = []
uedot_n2s = []
kfe_n2s = []
kfh_n2s = []
for n1 in range(nmodi):
phi_n1 = slice.modih[n1]
phi_n1s.append(phi_n1)
phidot_n1s.append(dot(phi_n1))
psi_n1 = slice.modie[n1]
psi_n1s.append(psi_n1)
psisueps_n1s.append(sueps(psi_n1))
psidotsueps_n1s.append(sueps(dot(psi_n1)))
uh_n1 = copy.deepcopy(self.modih[n1])
# reduce to a single slice
kfh_n1 = uh_n1.k[mx]
kfh_n1s.append(kfh_n1)
uh_n1.k = numpy.atleast_1d(scipy.sqrt(kfh_n1**2 - kz**2))
uh_n1.sl = numpy.atleast_1d(uh_n1.sl[mx] * (k0/kfh_n1)**2)
uh_n1.al = numpy.atleast_1d(uh_n1.al[mx])
uh_n1.sr = numpy.atleast_1d(uh_n1.sr[mx] * (k0/kfh_n1)**2)
uh_n1.ar = numpy.atleast_1d(uh_n1.ar[mx])
uh_n1.U = numpy.atleast_1d(uh_n1.U[mx:mx+2])
uh_n1s.append(uh_n1)
uhdot_n1s.append(dot(uh_n1))
ue_n1 = copy.deepcopy(self.modie[n1])
# reduce to a single slice
kfe_n1 = ue_n1.k[mx]
kfe_n1s.append(kfe_n1)
ue_n1.k = numpy.atleast_1d(scipy.sqrt(kfe_n1**2 - kz**2))
ue_n1.sl = numpy.atleast_1d(ue_n1.sl[mx] * (k0/kfe_n1)**2)
ue_n1.al = numpy.atleast_1d(ue_n1.al[mx])
ue_n1.sr = numpy.atleast_1d(ue_n1.sr[mx] * (k0/kfe_n1)**2)
ue_n1.ar = numpy.atleast_1d(ue_n1.ar[mx])
ue_n1.U = numpy.atleast_1d(ue_n1.U[mx:mx+2])
ue_n1s.append(ue_n1)
uedot_n1s.append(dot(ue_n1))
phi_n2 = mode.slicesx[mx].modih[n1]
phi_n2s.append(phi_n2)
phidot_n2s.append(dot(phi_n2))
psi_n2 = mode.slicesx[mx].modie[n1]
psi_n2s.append(psi_n2)
psisueps_n2s.append(sueps(psi_n2))
psidotsueps_n2s.append(sueps(dot(psi_n2)))
uh_n2 = copy.deepcopy(mode.modih[n1])
# reduce to a single slice
kfh_n2 = uh_n2.k[mx]
kfh_n2s.append(kfh_n2)
uh_n2.k = numpy.atleast_1d(scipy.sqrt(kfh_n2**2 - kz**2))
uh_n2.sl = numpy.atleast_1d(uh_n2.sl[mx] * (k0/kfh_n2)**2)
uh_n2.al = numpy.atleast_1d(uh_n2.al[mx])
uh_n2.sr = numpy.atleast_1d(uh_n2.sr[mx] * (k0/kfh_n2)**2)
uh_n2.ar = numpy.atleast_1d(uh_n2.ar[mx])
uh_n2.U = numpy.atleast_1d(uh_n2.U[mx:mx+2])
uh_n2s.append(uh_n2)
uhdot_n2s.append(dot(uh_n2))
ue_n2 = copy.deepcopy(mode.modie[n1])
# reduce to a single slice
kfe_n2 = ue_n2.k[mx]
kfe_n2s.append(kfe_n2)
ue_n2.k = numpy.atleast_1d(scipy.sqrt(kfe_n2**2 - kz**2))
ue_n2.sl = numpy.atleast_1d(ue_n2.sl[mx] * (k0/kfe_n2)**2)
ue_n2.al = numpy.atleast_1d(ue_n2.al[mx])
ue_n2.sr = numpy.atleast_1d(ue_n2.sr[mx] * (k0/kfe_n2)**2)
ue_n2.ar = numpy.atleast_1d(ue_n2.ar[mx])
ue_n2.U = numpy.atleast_1d(ue_n2.U[mx:mx+2])
ue_n2s.append(ue_n2)
uedot_n2.append(dot(ue_n2))
for n1 in range(nmodi):
uh_n1 = uh_n1s[n1]
ue_n1 = ue_n1s[n1]
uedot_n1 = uedot_n1s[n1]
phi_n1 = phi_n1s[n1]
psi_n1 = psi_n1s[n1]
psidotsueps_n1 = psidotsueps_n1s[n1]
psisueps_n1 = psisueps_n1s[n1]
kfe_n1 = kfe_n1s[n1]
for n2 in range(nmodi):
uh_n2 = uh_n2s[n2]
uhdot_n2 = uhdot_n2s[n2]
ue_n2 = ue_n2s[n2]
phi_n2 = phi_n2s[n2]
phidot_n2 = phidot_n2s[n2]
psi_n2 = psi_n2s[n2]
kfh_n2 = kfh_n2s[n2]
Sx += kz * kfh_n2**2 / k0**3 * scalarprod(uh_n1, uh_n2) * scalarprod(phi_n1, phi_n2) \
- kfh_n2**2 / k0**4 * scalarprod(uedot_n1, uh_n2) * scalarprod(psidotsueps_n1, phi_n2)
Sy += kfe_n1**2 * kz / k0**3 * scalarprod(ue_n1, ue_n2) * scalarprod(psisueps_n1, psi_n2) \
+ kfe_n1**2 / k0**4 * scalarprod(ue_n1, uhdot_n2) * scalarprod(psisueps_n1, phidot_n2)
return (Sx, Sy)
def overlap(self, mode):
Sx, Sy = self.__overlap(mode)
return Sx - Sy
def norm(self):
return scipy.sqrt(self.overlap(self))
def normalize(self):
n = self.norm()
for ue, uh in zip(self.modie, self.modih):
ue.sl /= n
ue.al /= n
ue.sr /= n
ue.ar /= n
uh.sl /= n
uh.al /= n
uh.sr /= n
uh.ar /= n
def get_fields_for_FDTD(self, x, y):
"""Get mode's field on a staggered grid.
Note: ignores some fields on the boudaries.
"""
x0 = self.get_x()
y0 = self.get_y()
Ex, Ey, Ez, cBx, cBy, cBz = self.fields(x0, y0)
# Ex: ignores y = 0, max
x_Ex_FDTD = EMpy.utils.centered1d(x)
y_Ex_FDTD = y[1:-1]
Ex_FDTD = EMpy.utils.interp2(x_Ex_FDTD, y_Ex_FDTD, x0, y0, Ex)
# Ey: ignores x = 0, max
x_Ey_FDTD = x[1:-1]
y_Ey_FDTD = EMpy.utils.centered1d(y)
Ey_FDTD = EMpy.utils.interp2(x_Ey_FDTD, y_Ey_FDTD, x0, y0, Ey)
# Ez: ignores x, y = 0, max
x_Ez_FDTD = x[1:-1]
y_Ez_FDTD = y[1:-1]
Ez_FDTD = EMpy.utils.interp2(x_Ez_FDTD, y_Ez_FDTD, x0, y0, Ez)
# Hx: ignores x = 0, max, /120pi, reverse direction
x_Hx_FDTD = x[1:-1]
y_Hx_FDTD = EMpy.utils.centered1d(y)
Hx_FDTD = EMpy.utils.interp2(x_Hx_FDTD, y_Hx_FDTD, x0, y0, cBx) / (-120. * numpy.pi) # OKKIO!
# Hy: ignores y = 0, max, /120pi, reverse direction
x_Hy_FDTD = EMpy.utils.centered1d(x)
y_Hy_FDTD = y[1:-1]
Hy_FDTD = EMpy.utils.interp2(x_Hy_FDTD, y_Hy_FDTD, x0, y0, Hy) / (-120. * numpy.pi)
# Hz: /120pi, reverse direction
x_Hz_FDTD = EMpy.utils.centered1d(x)
y_Hz_FDTD = EMpy.utils.centered1d(y)
Hz_FDTD = EMpy.utils.interp2(x_Hz_FDTD, y_Hz_FDTD, x0, y0, Hz) / (-120. * numpy.pi)
return (Ex_FDTD, Ey_FDTD, Ez_FDTD, Hx_FDTD, Hy_FDTD, Hz_FDTD)
def plot(self, x_=None, y_=None):
if x_ is None:
x = self.get_x()
else:
x = numpy.atleast_1d(x_)
if y_ is None:
y = self.get_y()
else:
y = numpy.atleast_1d(y_)
f = self.fields(x, y)
# fields
pylab.figure()
titles = ['Ex', 'Ey', 'Ez', 'cBx', 'cBy', 'cBz']
for i in range(6):
subplot_id = 231 + i
pylab.subplot(subplot_id)
pylab.contour(x, y, numpy.abs(f[i]))
pylab.xlabel('x')
pylab.ylabel('y')
pylab.title(titles[i])
pylab.axis('image')
pylab.show()
# power
pylab.figure()
pylab.contour(x, y, numpy.abs(f[-1]))
pylab.xlabel('x')
pylab.ylabel('y')
pylab.title('cSz')
pylab.axis('image')
pylab.show()
def __str__(self):
return 'neff = %s' % (self.keff / (2 * numpy.pi / self.slicesx[0].wl))
class FMM(ModeSolver):
pass
class FMM1d(FMM):
"""Drive to simulate 1d structures.
Examples
========
Find the first 3 TE modes of two slabs of refractive indeces 1 and 3,
of thickness 1um each, for wl = 1, with symmetric boundary conditions:
>>> import numpy
>>> import FMM
>>> Uy = numpy.array([0., 1., 2.])
>>> ny = numpy.array([1., 3.])
>>> wl = 1.
>>> nmodi = 3
>>> simul = FMM.FMM1d(Uy, ny, 'SS').solve(wl, nmodi, 'TE')
>>> keff_0_expected = 18.790809413149393
>>> keff_1_expected = 18.314611633384185
>>> keff_2_expected = 17.326387847565034
>>> assert(numpy.allclose(simul.modes[0].keff, keff_0_expected))
>>> assert(numpy.allclose(simul.modes[1].keff, keff_1_expected))
>>> assert(numpy.allclose(simul.modes[2].keff, keff_2_expected))
"""
def __init__(self, Uy, ny, boundary):
"""Set coordinates of regions, refractive indeces and boundary conditions."""
self.Uy = Uy
self.ny = ny
self.boundary = boundary
def solve(self, wl, nmodes, polarization, verbosity=0):
"""Find nmodes modes at a given wavelength and polarization."""
Message('Solving 1d modes.', 1).show(verbosity)
self.wl = wl
self.nmodes = nmodes
self.polarization = polarization
self.modes = FMM1d_y(self.Uy, self.ny, self.wl, self.nmodes, self.boundary, self.polarization, verbosity)
return self
class FMM2d(FMM):
"""Drive to simulate 2d structures.
Examples
========
Find the first 2 modes of a lossy Si channel waveguide in SiO2, using
only 3 1dmodes and with electric and magnetic bc on x and y, respectively:
>>> import numpy
>>> import FMM
>>> wl = 1.55
>>> nmodislices = 3
>>> nmodi2d = 2
>>> Ux = numpy.array([0, 2, 2.4, 4.4])
>>> Uy = numpy.array([0, 2, 2.22, 4.22])
>>> boundary = Boundary(xleft='Electric Wall',
yleft='Magnetic Wall',
xright='Electric Wall',
yright='Magnetic Wall')
>>> n2 = 1.446
>>> n1 = 3.4757 - 1e-4j
>>> refindex = numpy.array([[n2, n2, n2],
[n2, n1, n2],
[n2, n2, n2]])
>>> simul = FMM.FMM2d(Ux, Uy, refindex, boundary).solve(wl, nmodislices, nmodi2d)
>>> keff0_expected = 9.666663697969399e+000 -4.028846755836984e-004j
>>> keff1_expected = 7.210476803133368e+000 -2.605078086535284e-004j
>>> assert(numpy.allclose(simul.modes[0].keff, keff0_expected))
>>> assert(numpy.allclose(simul.modes[1].keff, keff1_expected))
"""
def __init__(self, Ux, Uy, rix, boundary):
"""Set coordinates of regions, refractive indeces and boundary conditions."""
self.Ux = Ux
self.Uy = Uy
self.rix = rix
self.boundary = boundary
def solve(self, wl, n1dmodes, nmodes, verbosity=0):
"""Find nmodes modes at a given wavelength using n1dmodes 1d modes in each slice."""
Message('Solving 2d modes', 1).show(verbosity)
self.wl = wl
self.n1dmodes = n1dmodes
self.nmodes = nmodes
self.slices = script1d(self.Ux, self.Uy, self.rix, self.wl, self.boundary, self.n1dmodes, verbosity)
self.modes = FMM1d_x_component(self.slices, nmodes, verbosity)
return self
def analyticalsolution(nmodi, TETM, FMMpars):
betay = FMMpars['beta']
epsilon = FMMpars['epsilon']
Uy = FMMpars['Uy']
by = FMMpars['boundary']
Nregions = len(epsilon)
sl = numpy.zeros((nmodi,Nregions), dtype=complex)
sr = numpy.zeros_like(sl)
al = numpy.zeros_like(sl)
ar = numpy.zeros_like(sl)
# interval
D = Uy[-1] - Uy[0]
if TETM == 'TE':
N = numpy.sqrt(2. / D)
else:
N = numpy.sqrt(2. / D * epsilon[0])
# boundary condition
if by == 'AA':
kn = (numpy.pi * numpy.arange(1, nmodi + 1) / D)
kn = kn[:, numpy.newaxis]
sl = numpy.sin(kn * (Uy[:-1] - Uy[0]))
sr = numpy.sin(kn * (Uy[1:] - Uy[0]))
al = numpy.cos(kn * (Uy[:-1] - Uy[0]))
ar = numpy.cos(kn * (Uy[1:] - Uy[0]))
sr[:, -1] = 0.
sl[:, 0] = 0.
elif by == 'AS':
kn = (numpy.pi * (numpy.arange(0, nmodi) + .5) / D)
kn = kn[:, numpy.newaxis]
sl = numpy.sin(kn * (Uy[:-1] - Uy[0]))
sr = numpy.sin(kn * (Uy[1:] - Uy[0]))
al = numpy.cos(kn * (Uy[:-1] - Uy[0]))
ar = numpy.cos(kn * (Uy[1:] - Uy[0]))
ar[:, -1] = 0.
sl[:, 0] = 0.
elif by == 'SA':
kn = (numpy.pi * (numpy.arange(0, nmodi) + .5) / D)
kn = kn[:, numpy.newaxis]
sl = numpy.cos(kn * (Uy[:-1] - Uy[0]))
sr = numpy.cos(kn * (Uy[1:] - Uy[0]))
al = -numpy.sin(kn * (Uy[:-1] - Uy[0]))
ar = -numpy.sin(kn * (Uy[1:] - Uy[0]))
sr[:, -1] = 0.
al[:, 0] = 0.
elif by == 'SS':
kn = (numpy.pi * numpy.arange(0, nmodi) / D)
kn = kn[:, numpy.newaxis]
sl = numpy.cos(kn * (Uy[:-1] - Uy[0]))
sr = numpy.cos(kn * (Uy[1:] - Uy[0]))
al = -numpy.sin(kn * (Uy[:-1] - Uy[0]))
ar = -numpy.sin(kn * (Uy[1:] - Uy[0]))
ar[:, -1] = 0.
al[:, 0] = 0.
# normalizzazione
sl *= N
sr *= N
for n in range(0, nmodi):
al[n,:] *= N * kn[n]
ar[n,:] *= N * kn[n]
# caso speciale. se k=0 la funzione e' costante e la normalizzazione e'
# diversa. capita solo con boundary SS e per il primo modo
if by == 'SS':
sqrt2 = numpy.sqrt(2.)
sl[0,:] /= sqrt2
sr[0,:] /= sqrt2
al[0,:] /= sqrt2
ar[0,:] /= sqrt2
modi = []
for mk in range(0, nmodi):
modo = FMMMode1dy()
modo.sl = sl[mk,:].astype(complex)
modo.sr = sr[mk,:].astype(complex)
modo.al = al[mk,:].astype(complex)
modo.ar = ar[mk,:].astype(complex)
modo.k = kn[mk] * numpy.ones(Nregions)
modo.U = Uy
modo.keff = scipy.sqrt(betay[0]**2 - kn[mk]**2)
modo.zero = 0.
modo.pars = FMMpars
modi.append(modo)
return modi
def sinxsux(x):
return numpy.sinc(x / numpy.pi)
def FMMshootingTM(kz_, FMMpars):
betay = FMMpars['beta']
eps = FMMpars['epsilon']
Uy = FMMpars['Uy']
by = FMMpars['boundary']
kz = numpy.atleast_1d(kz_)
Nregions = len(betay)
d = numpy.diff(Uy)
Delta = numpy.zeros_like(kz)
sl = numpy.zeros(Nregions, dtype=complex)
sr = numpy.zeros_like(sl)
al = numpy.zeros_like(sl)
ar = numpy.zeros_like(sl)
k_ = scipy.sqrt(betay**2 - kz[:,numpy.newaxis]**2)
kd = k_[:,numpy.newaxis] * d
sinkdsuk_ = sinxsux(kd) * d
coskd_ = numpy.cos(kd)
sinkdk_ = numpy.sin(kd) * k_[:,numpy.newaxis]
# left boundary condition
if by[0] == 'A':
al[0] = 1
elif by[0] == 'S':
sl[0] = 1
else:
raise ValueError('unrecognized left boundary condition')
# right boundary condition
if by[1] == 'A':
ar[-1] = 1
elif by[1] == 'S':
sr[-1] = 1
else:
raise ValueError('unrecognized right boundary condition')
# ciclo sui layer
maxbetay = numpy.max(numpy.real(betay))
n1 = numpy.argmax(numpy.real(betay)) + 1
if n1 == Nregions:
n1 = Nregions - 1
n2 = n1 + 1
modo = FMMMode1dy()
for m in range(0, len(kz)):
k = k_[m,:]
sinkdsuk = sinkdsuk_[m,:][0]
coskd = coskd_[m,:][0]
sinkdk = sinkdk_[m,:][0]
for idx in range(0, n1):
sr[idx] = sl[idx] * coskd[idx] + al[idx] * sinkdsuk[idx]
ar[idx] = al[idx] * coskd[idx] - sl[idx] * sinkdk[idx]
#******************* requirement of continuity
if idx < n1 - 1:
sl[idx+1] = sr[idx];
al[idx+1] = ar[idx] / eps[idx] * eps[idx + 1];
#*******************
for idx1 in range(Nregions - 1, n2 - 2, -1):
sl[idx1] = sr[idx1] * coskd[idx1] - ar[idx1] * sinkdsuk[idx1]
al[idx1] = ar[idx1] * coskd[idx1] + sr[idx1] * sinkdk[idx1]
#******************* requirement of continuity
if idx1 > n2:
sr[idx1 - 1] = sl[idx1]
ar[idx1 - 1] = al[idx1] / eps[idx1] * eps[idx1 - 1]
#*******************
Delta[m] = (eps[n1-1] * sr[n1-1] * al[n2-1] - eps[n2-1] * ar[n1-1] * sl[n2-1])
if len(kz) < 2:
# normalize and save only if len(kz) == 1
# otherwise, modo is ignored and only Delta is useful
# normalizza la propagazione sinistra e quella destra
alfa = sr[n1-1] / sl[n2-1]
sl[n2-1:] *= alfa
sr[n2-1:] *= alfa
al[n2-1:] *= alfa
ar[n2-1:] *= alfa
modo.sl = sl
modo.sr = sr
modo.al = al
modo.ar = ar
modo.k = k
modo.U = Uy
modo.keff = kz
modo.zero = Delta
modo.pars = FMMpars
return (Delta, modo)
def FMMshooting(kz_, FMMpars):
betay = FMMpars['beta']
Uy = FMMpars['Uy']
by = FMMpars['boundary']
kz = numpy.atleast_1d(kz_)
Nregions = len(betay)
d = numpy.diff(Uy)
Delta = numpy.zeros_like(kz)
sl = numpy.zeros(Nregions, dtype=complex)
sr = numpy.zeros_like(sl)
al = numpy.zeros_like(sl)
ar = numpy.zeros_like(sl)
k_ = scipy.sqrt(betay**2 - kz[:,numpy.newaxis]**2)
kd = k_[:,numpy.newaxis] * d
sinkdsuk_ = sinxsux(kd) * d
coskd_ = numpy.cos(kd)
sinkdk_ = numpy.sin(kd) * k_[:,numpy.newaxis]
# left boundary condition
if by[0] == 'A':
al[0] = 1
elif by[0] == 'S':
sl[0] = 1
else:
raise ValueError('unrecognized left boundary condition')
# right boundary condition
if by[1] == 'A':
ar[-1] = 1
elif by[1] == 'S':
sr[-1] = 1
else:
raise ValueError('unrecognized right boundary condition')
# ciclo sui layer
maxbetay = numpy.max(numpy.real(betay))
n1 = numpy.argmax(numpy.real(betay)) + 1
if n1 == Nregions:
n1 = Nregions - 1
n2 = n1 + 1
modo = FMMMode1dy()
for m in range(0, len(kz)):
k = k_[m,:]
sinkdsuk = sinkdsuk_[m,:][0]
coskd = coskd_[m,:][0]
sinkdk = sinkdk_[m,:][0]
for idx in range(0, n1):
sr[idx] = sl[idx] * coskd[idx] + al[idx] * sinkdsuk[idx]
ar[idx] = al[idx] * coskd[idx] - sl[idx] * sinkdk[idx]
#******************* requirement of continuity
if idx < n1 - 1:
sl[idx + 1] = sr[idx];
al[idx + 1] = ar[idx];
#*******************
for idx1 in range(Nregions - 1, n2 - 2, -1):
sl[idx1] = sr[idx1] * coskd[idx1] - ar[idx1] * sinkdsuk[idx1]
al[idx1] = ar[idx1] * coskd[idx1] + sr[idx1] * sinkdk[idx1]
#******************* requirement of continuity
if idx1 > n2:
sr[idx1 - 1] = sl[idx1]
ar[idx1 - 1] = al[idx1]
#*******************
Delta[m] = (sr[n1-1] * al[n2-1] - ar[n1-1] * sl[n2-1])
## len_kz = len(kz)
## k = k_[0,:]
## sinkdsuk = sinkdsuk_[0,:][0]
## coskd = coskd_[0,:][0]
## sinkdk = sinkdk_[0,:][0]
## code = """
## for (int m = 0; m < len_kz; ++m) {
## //k = k_(m,:);
## //sinkdsuk = sinkdsuk_(0,:);
## //coskd = coskd_(0,:);
## //sinkdk = sinkdk_(0,:);
## int nn1 = int(n1);
## for (int idx = 0; idx < nn1; ++idx) {
## sr(idx) = sl(idx) * coskd(idx) + al(idx) * sinkdsuk(idx);
## ar(idx) = al(idx) * coskd(idx) - sl(idx) * sinkdk(idx);
## if (idx < nn1 - 1) {
## sl(idx + 1) = sr(idx);
## al(idx + 1) = ar(idx);
## }
## }
## int nn2 = int(n2);
## for (int idx1 = Nregions - 1; idx1 > nn2 - 2; --idx1) {
## sl(idx1) = sr(idx1) * coskd(idx1) - ar(idx1) * sinkdsuk(idx1);
## al(idx1) = ar(idx1) * coskd(idx1) + sr(idx1) * sinkdk(idx1);
## if (idx1 > nn2) {
## sr(idx1 - 1) = sl(idx1);
## ar(idx1 - 1) = al(idx1);
## }
## }
## //Delta(m) = std::complex<double>(1) * (sr(nn1-1) * al(nn2-1) - ar(nn1-1) * sl(nn2-1));
## }
## """
##
## from scipy import weave
## from scipy.weave import converters
## weave.inline(code,
## ['n1', 'n2', 'Nregions', 'sl', 'sr', 'al', 'ar', 'len_kz', 'Delta',
## 'k', 'sinkdsuk', 'sinkdk', 'coskd',
## 'k_', 'sinkdsuk_', 'sinkdk_', 'coskd_'],
## type_converters = converters.blitz,
## compiler = 'gcc')
if len(kz) < 2:
# normalize and save only if len(kz) == 1
# otherwise, modo is ignored and only Delta is useful
# normalizza la propagazione sinistra e quella destra
alfa = sr[n1-1] / sl[n2-1]
sl[n2-1:] *= alfa
sr[n2-1:] *= alfa
al[n2-1:] *= alfa
ar[n2-1:] *= alfa
modo.sl = sl
modo.sr = sr
modo.al = al
modo.ar = ar
modo.k = k
modo.U = Uy
modo.keff = kz
modo.zero = Delta
modo.pars = FMMpars
return (Delta, modo)
def remove_consecutives(x, y):
b = numpy.r_[numpy.diff(x) == 1, 0].astype(int)
ic = 0
flag = 0
l = []
for ib in range(len(b)):
if flag == 0:
c = [x[ib]]
ic += 1
if b[ib] == 1:
flag = 1
else:
l.append(c)
else:
c.append(x[ib])
if b[ib] != 1:
flag = 0
l.append(c)
index = []
for il, ll in enumerate(l):
newi = ll
itmp = numpy.argmax(y[newi])
index.append(newi[0] + itmp)
return index
def findzerosnew(x, y, searchinterval):
minsi = 2 * numpy.abs(x[1] - x[0])
if searchinterval < minsi:
searchinterval = minsi
dy = numpy.r_[0, numpy.diff(numpy.diff(scipy.log(y))), 0]
idy = numpy.where(dy > 0.005)[0]
if len(idy) == 0:
zeri = numpy.array([])
z1 = numpy.array([])
z2 = numpy.array([])
else:
ind = remove_consecutives(idy, dy)
zeri = x[ind]
z1 = numpy.zeros_like(zeri)
z2 = numpy.zeros_like(zeri)
dz = numpy.abs(numpy.diff(zeri))
if len(dz) == 0:
z1[0] = zeri - searchinterval/2
z2[0] = zeri + searchinterval/2
else:
delta = numpy.min([dz[0], searchinterval])
z1[0] = zeri[0] - delta/2
z2[0] = zeri[0] + delta/2
for idx in range(1, len(zeri) - 1):
delta = numpy.min([dz[idx - 1], dz[idx], searchinterval])
z1[idx] = zeri[idx] - delta/2
z2[idx] = zeri[idx] + delta/2
delta = | numpy.min([dz[-1], searchinterval]) | numpy.min |
import numpy as np
from scipy.spatial.distance import squareform
from random import randint
# there are more efficient algorithms for this
# https://people.csail.mit.edu/virgi/6.890/papers/APBP.pdf
def max_min(A, B):
'''max-min product of two square matrices
params:
A, B: NxN numpy arrays '''
assert A.shape == B.shape
return np.max(np.minimum(A[:, :, None], B[None, :, :]), axis=1)
def mat_gromov_prod(dists, base):
'''Gromov products of N-point metric space relative to base point
Args:
dists (ndarray): NxN matrix of pairwise distances
base (int): index of the basepoint in 0...N-1 '''
assert dists.shape[0] == dists.shape[1] and 0 <= base < dists.shape[0]
row = dists[base, :][None, :]
col = dists[:, base][:, None]
return 0.5*(row+col-dists)
def delta_rel(dists, base=None):
''' Measure the delta-hyperbolicity constant of data
with respect to basepoint, normalized by the diameter (max dist).
Args:
dists (ndarray): NxN matrix of pairwise distances
base (int): index of basepoint in 0...N-1 (default = random)
'''
if base is None:
base = randint(0,dist.shape[0]-1)
assert is_metric(dists) and 0 <= base < dists.shape[0]
G = mat_gromov_prod(dists, base)
delta = np.max(max_min(G,G)-G)
diam = np.max(dists)
return delta/diam
def delta_sample(X, **kwargs):
bs = kwargs.get("bs", X.shape[0])
tries = kwargs.get("tries", 10)
dist = kwargs.get("dist", None)
deltas = []
for i in range(tries):
idx = np.random.choice(X.shape[0], bs)
batch = X[idx]
if dist is None:
dists = np.linalg.norm(
batch[None:,]-batch[:,None],
axis=-1)
else:
dists = dist(batch,batch)
deltas.append(
delta_rel(dists,randint(0,bs-1))
)
return deltas
def is_metric(X, tol=1e-8):
return len(X.shape) == 2 and \
np.all( np.abs(X-X.T)<tol ) and\
np.all( np.abs(np.diag(X))<tol ) and\
np.all(X >= 0)
def avg_distortion(metric1, metric2):
''' Average distortion between two metrics.
Args:
metric1, metric2 (ndarray): N x N distance matrices,
or length N*(N-1)//2 compressed distance matrices
Returns:
average distortion (float)
'''
assert metric1.shape == metric2.shape
if len(metric1.shape) > 1:
assert is_metric(metric1)
X = squareform(metric1)
else:
X = metric1
if len(metric2.shape) > 1:
assert is_metric(metric2)
Y = squareform(metric2)
else:
Y = metric2
return np.mean( | np.abs(X-Y) | numpy.abs |
from google.cloud import bigquery
from google.oauth2 import service_account
import pandas_gbq
import pandas as pd
from pandas.io import gbq
import numpy as np
from IPython import display
import tensorflow as tf
from tensorflow.python.data import Dataset
import math
from sklearn import metrics
from matplotlib import pyplot as plt
from sklearn.utils import shuffle
from googleapiclient import discovery
from googleapiclient import errors
import time
import re
import os
from google.cloud import storage
import datetime
import pytz
class Auto_predictor():
def __init__(self, service_account_credential_file, project_id, dataset_id, model_name, bucket, sql, region = "us-east1", data_format = 'JSON', max_worker_count=8, version_name=None,runtime_version=None):
timestamp = datetime.datetime.now(pytz.timezone("America/New_York")).strftime('%Y_%m_%d_%H_%M_%S')
current_day = datetime.datetime.now(pytz.timezone('US/Eastern')).strftime('%Y-%m-%d')
last_hour = datetime.datetime.now(pytz.timezone('US/Eastern')).replace(microsecond=0,second=0,minute=0).strftime("%H:%M:%S")
self.credentials = service_account.Credentials.from_service_account_file(service_account_credential_file)
self.project_id = project_id
self.dataset_id = dataset_id
self.model_name = model_name
self.bucket = bucket
self.sql = sql
self.region = region
self.data_format = data_format
self.version_name = version_name
self.max_worker_count = max_worker_count
self.runtime_version = runtime_version
self.job_id = '{}_{}'.format(model_name,timestamp)
self.input_bucket = '{}/prediction_input/{}'.format(bucket,model_name)
self.output_bucket = '{}/prediction_output/{}'.format(bucket,model_name)
def __repr__(self):
return ('model_name:{}\ninput_bucket:{}\noutput_bucket:{}\nsql:{}'.format(self.model_name,self.input_bucket,self.output_bucket,self.sql))
def prepare_input_data(self):
# This function takes data from bigQuery, massages it or do any necessary feature engineering in a SQL query, and dumps the result into a GCS bucket
# At the time of writing, the functionality of directly dumping bigQuery query result into a GCS bucket is not supported,
# So I used a temp table for storing the query result before dumping it to the GCS bucket.
client = bigquery.Client(credentials=self.credentials,project=self.project_id)
job_config = bigquery.QueryJobConfig()
table_ref = client.dataset(self.dataset_id).table("TEMP_TABLE")
job_config.destination = table_ref
job_config.write_disposition = bigquery.WriteDisposition.WRITE_TRUNCATE
query_job = client.query(self.sql,location="US",job_config=job_config,)
query_job.result()
print("Saved query result to {}".format(table_ref.path))
job_config = bigquery.job.ExtractJobConfig()
job_config.destination_format = bigquery.DestinationFormat.NEWLINE_DELIMITED_JSON
gcs_destination = '{}/*.{}'.format(self.bucket,self.data_format.lower())
extract_job = client.extract_table(table_ref, gcs_destination, job_config = job_config, location='US')
extract_job.result()
print('Dumped {}:{}.{} to {}'.format(self.project_id, self.dataset_id, "TEMP_TABLE", self.bucket))
def batch_predict(self):
# Google Cloud has specific naming requirements for project and model id
project_id = 'projects/{}'.format(self.project_id)
model_id = 'projects/{}/models/{}'.format(self.project_id, self.model_name)
body = {'jobId' : self.job_id,
'predictionInput': {'dataFormat' : self.data_format,
'inputPaths' : self.input_bucket,
'outputPath' : self.output_bucket,
'region' : self.region,
'versionName': self.version_name,
'modelName' : model_id}}
if self.max_worker_count:
body['predictionInput']['maxWorkerCount'] = self.max_worker_count
if self.runtime_version:
body['predictionInput']['runtimeVersion'] = self.runtime_version
# Make batch prediction
ml = discovery.build('ml', 'v1', credentials=self.credentials)
request = ml.projects().jobs().create(parent=project_id, body=body)
request.execute()
class Trainer():
def __init__(self, project_id, service_account_dir):
self.project_id = project_id
self.service_account_dir = service_account_dir
os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = self.service_account_dir
def forward_key_to_export(self,estimator):
estimator = tf.contrib.estimator.forward_features(estimator, self.passed_through_id_fields)
config = estimator.config
def model_fn2(features, labels, mode):
estimatorSpec = estimator._call_model_fn(features, labels, mode, config=config)
if estimatorSpec.export_outputs:
for ekey in self.passed_through_id_fields:
estimatorSpec.export_outputs[ekey] = tf.estimator.export.PredictOutput(estimatorSpec.predictions)
return estimatorSpec
return tf.estimator.Estimator(model_fn=model_fn2, config=config)
def auto_serving_fn(self):
def adding_passing_through_ids():
fields = self.training_features.columns
INSTANCE_KEY_COLUMN = [field for field in fields if field.lower() in self.passed_through_id_fields]
INPUT_COLUMNS = set(tf.feature_column.numeric_column(field,dtype=tf.dtypes.float64) if field.lower() in self.passed_through_id_fields else tf.feature_column.numeric_column(field) for field in fields)
inputs = {}
features = {}
for feat in INPUT_COLUMNS:
inputs[feat.name] = tf.placeholder(shape=[None], dtype=feat.dtype)
if feat.name in INSTANCE_KEY_COLUMN:
features[feat.name] = tf.identity(inputs[feat.name])
else:
features[feat.name] = inputs[feat.name]
serving_input_rcvr = tf.estimator.export.ServingInputReceiver(features, inputs)
return serving_input_rcvr
serving_fn = adding_passing_through_ids
return serving_fn()
def construct_feature_columns(self, input_features):
input_features_except_passthru_keys = input_features.copy().drop(self.passed_through_id_fields,axis = 1)
return set([tf.feature_column.numeric_column(my_feature) for my_feature in input_features_except_passthru_keys])
def my_input_fn(self, features, targets, batch_size=1, shuffle=True, num_epochs=None, shuffle_num = 10000):
features = {key:np.array(value) for key,value in dict(features).items()}
ds = Dataset.from_tensor_slices((features,targets))
ds = ds.batch(batch_size).repeat(num_epochs)
if shuffle:
ds = ds.shuffle(shuffle_num)
features, labels = ds.make_one_shot_iterator().get_next()
return features, labels
def predict_training_input_fn(self):
return my_input_fn(self.training_features, self.training_targets[self.target_name], num_epochs=1, shuffle=False)
def predict_validation_input_fn(self):
return my_input_fn(self.validation_features, self.validation_targets[self.target_name], num_epochs=1, shuffle=False)
def train(self, sql, steps, dropout_num, batch_size, hidden_units, target_name, gcs_bucket_name = "model_files", tmp_model_prefix = "temporary_models/auto_train", completed_model_prefix = "completed_models/auto_train", is_classification = True, learning_rate = 0.0003,decay_rate = 0.9,clip_gradients_by_norm = 5.0,passed_through_id_fields = []):
self.is_classification = is_classification
self.training_data = gbq.read_gbq(sql, self.project_id, dialect='standard')
self.target_name = target_name
self.gcs_bucket_name = self.project_id + "_" + gcs_bucket_name
self.tmp_model_prefix = tmp_model_prefix
self.completed_model_prefix = completed_model_prefix
self.training_features = self.training_data.loc[:, self.training_data.columns != target_name]
self.training_targets = self.training_data.loc[:, self.training_data.columns == target_name]
# self.tmp_model_dir = "gs://{}/{}".format(gcs_bucket_name,tmp_model_prefix)
self.completed_model_dir = "gs://{}/{}".format(self.gcs_bucket_name,completed_model_prefix)
self.passed_through_id_fields = passed_through_id_fields
# Create an estimator.
optimizer = tf.contrib.estimator.clip_gradients_by_norm(tf.train.RMSPropOptimizer(learning_rate=learning_rate, decay=decay_rate),clip_gradients_by_norm)
if is_classification:
estimator = tf.estimator.DNNClassifier(
feature_columns = self.construct_feature_columns(self.training_features),
hidden_units = hidden_units,
optimizer = optimizer,
dropout = dropout_num
#,
#model_dir = self.tmp_model_dir
)
else:
estimator = tf.estimator.DNNRegressor(
feature_columns = self.construct_feature_columns(self.training_features),
hidden_units = hidden_units,
optimizer = optimizer,
dropout = dropout_num
#,
#model_dir = self.tmp_model_dir
)
estimator = self.forward_key_to_export(estimator)
training_input_fn = lambda: self.my_input_fn(self.training_features, self.training_targets[target_name], batch_size=batch_size)
estimator.train(input_fn=training_input_fn, steps=steps)
self.estimator = estimator
print("training has been completed")
def export_model(self):
storage_client = storage.Client(project=self.project_id)
try:
bucket_name = storage_client.create_bucket(self.gcs_bucket_name)
except:
bucket_name = storage_client.get_bucket(self.gcs_bucket_name)
completed_model_files = bucket_name.list_blobs(prefix=self.completed_model_dir)
for file in completed_model_files:
file.delete()
self.completed_model_dir = self.estimator.export_savedmodel(export_dir_base = self.completed_model_dir, serving_input_receiver_fn = self.auto_serving_fn)
self.completed_model_dir = re.sub("'","",re.sub("b'", "", str(self.completed_model_dir)))
# temp_model_files = bucket_name.list_blobs(prefix=self.tmp_model_prefix)
# for file in temp_model_files:
# file.delete()
print("Model has been exported to {}".format(self.completed_model_dir))
return self.completed_model_dir
def validate(self, validation_data_sql):
self.validation_data = gbq.read_gbq(validation_data_sql, self.project_id, dialect='standard')
self.validation_features = self.validation_data[:, self.validation_data.columns != self.target_name]
self.validation_targets = self.validation_data[:, self.validation_data.columns == self.target_name]
training_predictions = self.estimator.predict(input_fn=self.predict_training_input_fn)
validation_predictions = self.estimator.predict(input_fn=self.predict_validation_input_fn)
if self.is_classification:
training_predictions = [item for item in training_predictions]
training_predictions = pd.concat([pd.DataFrame(np.array([item['class_ids'][0] for item in training_predictions]))], axis=1)
training_predictions.columns = ['class_ids','classes','logistic','logits','probabilities']
training_predictions['label'] = pd.DataFrame(np.array(self.training_targets[self.target_name]))
validation_predictions = [item for item in validation_predictions]
validation_predictions = pd.concat([pd.DataFrame( | np.array([item['class_ids'][0] for item in validation_predictions]) | numpy.array |
# -*- coding: utf-8 -*-#
"""
File: Auto_Loan_ML.py
Author: <NAME>
Date: 3/15/20
Desc: Analysis of GM Financial Consumer Automobile Receivables Trust Data Tape
Prediction of Delinquency via Tree-Based Feature Importance Methods
"""
""" ======================= Import dependencies ========================== """
import numpy as np
import pandas as pd
import os
import glob
from collections import OrderedDict
import matplotlib.pyplot as plt
import seaborn as sns
import sklearn.metrics as sm
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.ensemble import AdaBoostClassifier, AdaBoostRegressor
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor
from sklearn.metrics import accuracy_score, classification_report
from sklearn.inspection import permutation_importance
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import OneHotEncoder
""" ====================== Function definitions ========================== """
def getIndexes(dfObj, value):
# Empty list
listOfPos = []
# isin() method will return a dataframe with boolean values, True at the positions where element exists
result = dfObj.isin([value])
# any() method will return a boolean series
seriesObj = result.any()
# Get list of columns where element exists
columnNames = list(seriesObj[seriesObj == True].index)
# Iterate over the list of columns and extract the row index where element exists
for col in columnNames:
rows = list(result[col][result[col] == True].index)
for row in rows:
listOfPos.append((row, col))
# This list contains a list tuples with
# the index of element in the dataframe
return listOfPos
def plot_feature_importances(feature_importances, title, feature_names):
# Normalize the importance values
feature_importances = 100.0 * (feature_importances / max(feature_importances))
# Sort the index values and flip them so that they are arranged in decreasing order of importance
index_sorted = np.flipud( | np.argsort(feature_importances) | numpy.argsort |
import time
import tensorflow as tf
import numpy as np
from gcn.utils import *
from gcn.models import GCN, MLP, BCP
# Set random seed
seed = 123
| np.random.seed(seed) | numpy.random.seed |
import numpy as np
from scipy.sparse import lil_matrix
from scipy import stats
from joblib import Parallel, delayed
from joblib import load, dump
import tempfile
import shutil
import os
import warnings
from ..processing import knee
warnings.filterwarnings("ignore")
def wgr_BOLD_event_vector(N, matrix, thr, k, temporal_mask):
"""
Detect BOLD event.
event > thr & event < 3.1
"""
data = lil_matrix((1, N))
matrix = matrix[:, np.newaxis]
if 0 in np.array(temporal_mask).shape:
matrix = stats.zscore(matrix, ddof=1)
matrix = np.nan_to_num(matrix)
for t in range(1 + k, N - k + 1):
if matrix[t - 1, 0] > thr[0] and \
np.all(matrix[t - k - 1:t - 1, 0] < matrix[t - 1, 0]) and \
np.all(matrix[t - 1, 0] > matrix[t:t + k, 0]):
data[0, t - 1] = 1
else:
datm = np.mean(matrix[temporal_mask])
datstd = np.std(matrix[temporal_mask])
datstd[datstd == 0] = 1
matrix = np.divide((matrix - datm), datstd)
for t in range(1 + k, N - k + 1):
if temporal_mask[t-1]:
if matrix[t - 1, 0] > thr[0] and \
np.all(matrix[t - k - 1:t - 1, 0] < matrix[t - 1, 0]) and \
np.all(matrix[t - 1, 0] > matrix[t:t + k, 0]):
data[0, t - 1] = 1
return data
def tor_make_deconv_mtx3(sf, tp, eres):
docenter = 0
if type(sf) is not dict:
sf2 = {}
for i in range(0, sf.shape[1]):
sf2[i] = sf[:, i]
sf = sf2
if type(tp) is int:
tp = np.tile(tp, (1, len(sf)))
if len(tp) != len(sf):
print('timepoints vectors (tp) and \
stick function (sf) lengths do not match!')
return
tbefore = 0
nsess = len(sf)
numtrs = int(np.around(np.amax(sf[0].shape) / eres))
myzeros = np.zeros((numtrs, 1))
DX = np.zeros((numtrs, 1))
for i in range(0, len(sf)):
Snumtrs = np.amax(sf[i].shape) / eres
if(Snumtrs != np.round(Snumtrs)):
print('length not evenly divisible by eres')
if(numtrs != Snumtrs):
print('different length than sf[0]')
inums = np.nonzero(sf[i] > 0)[0]
inums = inums / eres
inums = np.ceil(inums).astype(int)
sf[i] = np.ravel(myzeros)
sf[i][inums] = 1
index = 0
for i in range(0, len(sf)):
if tbefore != 0:
for j in range(tbefore - 1, -1, -1):
sf_temp = sf[i][j:]
sf_temp = sf_temp[:, np.newaxis]
mysf = np.concatenate((sf_temp, np.zeros((j, 1))))
if index == 0:
DX[:, index] = np.ravel(mysf)
else:
DX = np.column_stack((DX, mysf))
index += 1
if index == 0:
DX[:, index] = sf[i]
else:
DX = np.column_stack((DX, sf[i]))
index += 1
inums = np.nonzero(sf[i] == 1)[0]
for j in range(1, np.ravel(tp)[i]):
myzeros = np.zeros((numtrs, 1))
inums = inums + 1
reg = myzeros
inums = inums[inums < numtrs]
reg[inums] = 1
while (np.amax(reg.shape) < DX.shape[0]):
reg = np.concatenate((reg, np.zeros(1, 1)))
DX = np.column_stack((DX, reg))
index += 1
if nsess < 2:
DX = np.column_stack((DX, np.ones((DX.shape[0], 1))))
else:
X = np.zeros((DX.shape[0], 1))
index = 0
scanlen = DX.shape[0] / nsess
if np.around(scanlen) != scanlen:
print('Model length is not an even multiple of scan length.')
for startimg in range(0, DX.shape[0], int(np.around(scanlen))):
if index == 0:
X[startimg:startimg + int(np.around(scanlen)), index] = 1
else:
X_temp = np.zeros((DX.shape[0], 1))
X_temp[startimg:startimg + int(np.around(scanlen)), 0] = 1
X = np.column_stack((X, X_temp))
index += 1
DX = np.column_stack((DX, X))
if docenter:
wh = np.arange(1, DX.shape[1] - nsess + 1)
DX[:, wh] = DX[:, wh] - np.tile(np.mean(DX[:, wh]), (DX.shape[0], 1))
return DX, sf
def Fit_sFIR2(tc, TR, Runs, T, mode):
DX, sf = tor_make_deconv_mtx3(Runs, T, 1)
DX2 = DX[:, 0:T]
num = T
if mode == 1:
C = np.arange(1, num + 1).reshape((1, num)).conj().T\
.dot(np.ones((1, num)))
h = | np.sqrt(1 / (7 / TR)) | numpy.sqrt |
# -*- coding: utf-8 -*-
""" invdisttree.py: inverse-distance-weighted interpolation using KDTree
fast, solid, local
"""
from __future__ import division
import numpy as np
from scipy.spatial import cKDTree as KDTree
from shapely.geometry import LineString
# http://docs.scipy.org/doc/scipy/reference/spatial.html
__date__ = "2010-11-09 Nov" # weights, doc
class Invdisttree:
""" Inverse-distance-weighted interpolation using KDTree
Examples
--------
>>> invdisttree = Invdisttree( X, z ) -- data points, values
>>> interpol = invdisttree( q, nnear=3, eps=0, p=1, weights=None, stat=0 )
# interpolates z from the 3 points nearest each query point q;
>>> interpol(q)
Finds the 3 data points nearest point q, at distances d1 d2 d3
and returns the IDW average of the values z1 z2 z3
.. math:: (z1/d1 + z2/d2 + z3/d3) / (1/d1 + 1/d2 + 1/d3) = .55 z1 + .27 z2 + .18 z3
How many nearest neighbors should one take?
1. start with 8 11 14 .. 28 in 2d 3d 4d .. 10d; see Wendel's formula
2. make 3 runs with nnear= e.g. 6 8 10, and look at the results
There is also a parameter p that weights nearer points more, farther points less.
In 2d, the circles around query points have :math:`areas ~ distance^2`
So p=2 is essentially inverse-area weighting:
.. math::
(z1/area1 + z2/area2 + z3/area3)/ (1/area1 + 1/area2 + 1/area3) = .74 z1 + .18 z2 + .08 z3
Notes
-----
If the components of the X coordinates measure different things, Euclidean distance
can be way off. For example, if X0 is in the range 0 to 1
but X1 0 to 1000, the X1 distances will swamp X0;
rescale the data, i.e. make X0.std() ~= X1.std() .
"""
def __init__( self, X, z, leafsize=10, stat=0 ):
"""Constructor using coordinates and data
Parameters
----------
X : np.ndarray
Coordinates of data points
z : np.ndarray
Data values at data points
leafsize: int
tree parameter
stat : bool
accumulate wsum, wn for average weights
"""
assert len(X) == len(z), "len(X) %d != len(z) %d" % (len(X), len(z))
self.tree = KDTree( X, leafsize=leafsize ) # build the tree
self.x = X
self.z = z
self.stat = stat
self.wn = 0
self.wsum = None;
def __call__( self, q, nnear=6, eps=0, p=1, weights=None, gridboundary=None ):
""" Apply the interpolator to find nearest neighbors of each query point
Parameters
----------
q : Nx2
Destination points. may be one point, or a batch of points.
eps: float
approximate nearest, dist <= (1 + eps) * true nearest
p: float
power for decay with distance `
weights: float
optional multipliers for :math:`1 / distance^p`, of the same shape as q
gridboundary: True
avoid crossing land
Returns:
Interpolated values
"""
if gridboundary is not None: raise NotImplementedError("not implemented with gridboundary option")
q = np.asarray(q)
qdim = q.ndim
if qdim == 1:
q = np.array([q])
if self.wsum is None:
self.wsum = np.zeros(nnear)
# The reason we use KDTree here is for faster computation and also
# we want to find the nearest N points and their indices.
self.distances, self.ix = self.tree.query( q, k=nnear, eps=eps )
interpol = np.zeros( (len(self.distances),) + | np.shape(self.z[0]) | numpy.shape |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Tests for functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
import time
import numpy as np
from tensorflow.core.framework import function_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import function
from tensorflow.python.framework import graph_to_function_def
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import gen_logging_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
def _OptimizerOptions():
for cse in [False, True]:
for inline in [False, True]:
for cfold in [False, True]:
yield config_pb2.ConfigProto(graph_options=config_pb2.GraphOptions(
optimizer_options=config_pb2.OptimizerOptions(
opt_level=config_pb2.OptimizerOptions.L0,
do_common_subexpression_elimination=cse,
do_function_inlining=inline,
do_constant_folding=cfold)))
@test_util.with_c_api
class FunctionTest(test.TestCase):
"""Test methods for verifying Function support.
These test methods are used as mix-ins in two test cases: with
and without C API support.
"""
def testIdentity(self):
@function.Defun(dtypes.float32, func_name="MyIdentity")
def MyIdentityFunc(a):
return a
with ops.Graph().as_default():
call = MyIdentityFunc([18.0])
self.assertEqual("MyIdentity", call.op.name)
with session.Session() as sess:
self.assertAllEqual([18.0], sess.run(call))
def testIdentityOutputName(self):
@function.Defun(
dtypes.float32, func_name="MyIdentity", out_names=["my_result_name"])
def MyIdentityFunc(a):
return a
with ops.Graph().as_default():
call = MyIdentityFunc([18.0])
self.assertEqual("MyIdentity", call.op.name)
with session.Session() as sess:
self.assertAllEqual([18.0], sess.run(call))
def testTooManyOutputNames(self):
@function.Defun(
dtypes.float32, func_name="MyIdentity",
out_names=["my_result1", "my_result2"])
def MyIdentityFunc(a):
return a
with ops.Graph().as_default():
with self.assertRaisesRegexp(
errors_impl.InvalidArgumentError,
(r"output names must be either empty or equal in size to outputs. "
"output names size = 2 outputs size = 1")):
MyIdentityFunc([18.0])
def testDefineFunction2Args(self):
@function.Defun(dtypes.float32, dtypes.float32, func_name="APlus2B")
def APlus2B(a, b):
return a + b * 2
with ops.Graph().as_default():
call = APlus2B([1.0], [2.0])
self.assertEqual("APlus2B", call.op.name)
with session.Session() as sess:
self.assertAllEqual([5.0], sess.run(call))
def testFunctionWithNoOutput(self):
@function.Defun(dtypes.float32, dtypes.float32)
def APlus2B(a, b):
c = a + b * 2 # Create some ops to have nodes in the body
print(c) # Using 'print' to make lint happy
with ops.Graph().as_default():
# Call function. There should be no exceptions.
APlus2B([1.0], [2.0])
def testDefineFunction2ArgsOutputName(self):
@function.Defun(
dtypes.float32,
dtypes.float32,
func_name="APlus2B",
out_names=["my_result_name"])
def APlus2B(a, b):
return a + b * 2
with ops.Graph().as_default():
call = APlus2B([1.0], [2.0])
self.assertEqual("APlus2B", call.op.name)
with session.Session() as sess:
self.assertAllEqual([5.0], sess.run(call))
def testDefineFunctionDuplicateOutputs(self):
@function.Defun(dtypes.float32, func_name="Duplicate")
def Duplicate(a):
b = a + 1.0
return b, b
g = ops.Graph()
with g.as_default():
Duplicate([3.0])
func_sig = g.as_graph_def().library.function[0].signature
# The names given to both outputs should be different
# even though the same tensor is emitted to both.
out_names = [a.name for a in func_sig.output_arg]
self.assertEqual(2, len(out_names))
self.assertNotEqual(out_names[0], out_names[1])
def testGradientFunc(self):
@function.Defun(dtypes.float32, func_name="XSquarePlusOneFn")
def XSquarePlusOne(x):
return x * x + 1.0
@function.Defun(dtypes.float32, dtypes.float32)
def XSquarePlusOneGrad(x, dy):
dx = functional_ops._symbolic_gradient(
input=[x, dy], Tout=[dtypes.float32], f="XSquarePlusOneFn", name="dx")
return dx
g = ops.Graph()
with g.as_default():
call_f = XSquarePlusOne([2.0])
call_g = XSquarePlusOneGrad([2.0], [0.1])
with session.Session() as sess:
self.assertAllClose([5.0], sess.run(call_f))
self.assertAllClose([0.4], sess.run(call_g))
def testTanhSymGrad(self):
@function.Defun(dtypes.float32)
def Forward(x):
return math_ops.reduce_sum(math_ops.tanh(x))
g = ops.Graph()
with g.as_default():
x = array_ops.placeholder(dtypes.float32)
y = Forward(x)
dx = gradients_impl.gradients([y], [x])
inp = np.array([-1, 1, 2, -2], dtype=np.float32)
feed = {x: inp}
cfg = config_pb2.ConfigProto(graph_options=config_pb2.GraphOptions(
optimizer_options=config_pb2.OptimizerOptions(
opt_level=config_pb2.OptimizerOptions.L1,
do_function_inlining=True)))
with session.Session(graph=g, config=cfg) as sess:
out, = sess.run(dx, feed)
self.assertAllClose(1 - np.square(np.tanh(inp)), out)
def testCustomGradient(self):
dtype = dtypes.float32
@function.Defun(dtype, dtype, dtype)
def XentLossGrad(logits, labels, dloss):
dlogits = array_ops.reshape(dloss, [-1, 1]) * (
nn_ops.softmax(logits) - labels)
dlabels = array_ops.zeros_like(labels)
# Takes exp(dlogits) to differentiate it from the "correct" gradient.
return math_ops.exp(dlogits), dlabels
@function.Defun(dtype, dtype, grad_func=XentLossGrad)
def XentLoss(logits, labels):
return math_ops.reduce_sum(labels * math_ops.log(nn_ops.softmax(logits)),
1)
g = ops.Graph()
with g.as_default():
logits = array_ops.placeholder(dtype)
labels = array_ops.placeholder(dtype)
loss = XentLoss(logits, labels)
dlogits = gradients_impl.gradients([loss], [logits])
x = np.random.uniform(-10., 10., size=(4, 9)).astype(np.float32)
prob = np.exp(x) / np.sum(np.exp(x), 1, keepdims=1)
y = np.random.uniform(-10., 10., size=(4, 9)).astype(np.float32)
for cfg in _OptimizerOptions():
tf_logging.info("cfg = %s", cfg)
with session.Session(graph=g, config=cfg) as sess:
out, = sess.run(dlogits, {logits: x, labels: y})
self.assertAllClose(out, np.exp(prob - y))
def testCustomGradientError(self):
dtype = dtypes.float32
@function.Defun(dtype, dtype, dtype)
def Grad(x, dy, dz):
# Should have returned 1 result.
return x, dy + dz
@function.Defun(dtype, grad_func=Grad)
def Forward(x):
return x, x
g = ops.Graph()
with g.as_default():
inp = array_ops.placeholder(dtype)
out = math_ops.add_n(Forward(inp))
dinp = gradients_impl.gradients(out, [inp])
x = np.random.uniform(-10., 10., size=(4, 9)).astype(np.float32)
with session.Session(graph=g) as sess:
with self.assertRaisesRegexp(
errors_impl.InvalidArgumentError,
"SymGrad expects to return 1.*but get 2.*instead"):
_ = sess.run(dinp, {inp: x})
def testSymGradShape(self):
g = ops.Graph()
with g.as_default():
x = array_ops.placeholder(dtypes.float32, [25, 4])
y = array_ops.placeholder(dtypes.float32, [200, 100])
dz = array_ops.placeholder(dtypes.float32, [1])
# We assume Foo is a function of (x, y) -> (z) Then, Foo's
# gradient function is (x, y, dz) -> (dx, dy). dx's shape
# should be the same as x's; and dy's shape should be the same
# as y's.
dx, dy = functional_ops._symbolic_gradient(
input=[x, y, dz], Tout=[dtypes.float32] * 2, f="Foo")
self.assertEqual(x.get_shape(), dx.get_shape())
self.assertEqual(y.get_shape(), dy.get_shape())
def testSymGradAttr(self):
@function.Defun(noinline=True)
def Foo(x):
return x * 2
self.assertTrue(
Foo.instantiate([dtypes.float32]).definition.attr["_noinline"].b)
g = ops.Graph()
with g.as_default():
x = constant_op.constant(3.0)
y = Foo(x)
dx, = gradients_impl.gradients(y, [x])
cfg = config_pb2.ConfigProto(graph_options=config_pb2.GraphOptions(
optimizer_options=config_pb2.OptimizerOptions(
opt_level=config_pb2.OptimizerOptions.L0,
do_common_subexpression_elimination=True,
do_function_inlining=True,
do_constant_folding=True)))
with self.test_session(graph=g, config=cfg):
self.assertAllClose(y.eval(), 6.)
self.assertAllClose(dx.eval(), 2.)
def _testZNoDepOnY(self, use_const_grad_ys):
@function.Defun(dtypes.float32, dtypes.float32)
def Foo(x, y): # pylint: disable=unused-argument
return x * 2
with ops.Graph().as_default():
# z = Foo(x, y). z doe
x = constant_op.constant(1.0)
y = constant_op.constant(2.0)
z = Foo(x, y)
if use_const_grad_ys:
dx, dy = gradients_impl.gradients([z], [x, y], grad_ys=[1.0])
else:
dx, dy = gradients_impl.gradients([z], [x, y])
with session.Session() as sess:
dx_val, dy_val = sess.run([dx, dy])
self.assertEqual([2.0], dx_val)
self.assertEqual([0.0], dy_val)
def testZNoDepOnY(self):
self._testZNoDepOnY(False)
def testZNoDepOnYConstGradYs(self):
# Tests for constant folding of grad_ys
self._testZNoDepOnY(True)
def testDefineFunctionNoArgs(self):
@function.Defun(func_name="AConstant")
def AConstant():
return constant_op.constant([42])
with ops.Graph().as_default():
call = AConstant()
self.assertEqual("AConstant", call.op.name)
with session.Session() as sess:
self.assertAllEqual([42], sess.run(call))
def testDefineFunctionNames(self):
@function.Defun(dtypes.float32, func_name="Foo")
def Foo(a):
return a + 1
with ops.Graph().as_default():
call1 = Foo([1.0])
self.assertEqual("Foo", call1.op.name)
call2 = Foo([1.0])
self.assertEqual("Foo_1", call2.op.name)
# pylint: disable=unexpected-keyword-arg
call3 = Foo([1.0], name="mine")
self.assertEqual("mine", call3.op.name)
with ops.name_scope("my"):
call4 = Foo([1.0], name="precious")
self.assertEqual("my/precious", call4.op.name)
def testNoOp(self):
@function.Defun(dtypes.float32)
def Foo(x):
y = logging_ops.Print(x, [], "Hello")
with ops.control_dependencies([y]):
z = control_flow_ops.no_op()
with ops.control_dependencies([z]):
return x * 2
with ops.Graph().as_default(), self.test_session():
z = Foo(constant_op.constant(3.0))
self.assertAllEqual(z.eval(), 6.0)
def testAssertOp(self):
@function.Defun(dtypes.float32)
def Foo(x):
check = gen_logging_ops._assert(math_ops.greater(x, 0), [x])
with ops.control_dependencies([check]):
return x * 2
g = ops.Graph()
with g.as_default(), self.test_session():
self.assertAllEqual(Foo(constant_op.constant(3.0)).eval(), 6.0)
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"assertion failed.*-3"):
self.assertAllEqual(Foo(constant_op.constant(-3.0)).eval(), 6.0)
@test_util.disable_c_api # Op._add_control_inputs doesn't work with C API
def testAssertWrapper(self):
@function.Defun(dtypes.float32)
def MyFn(x):
with ops.control_dependencies(
[control_flow_ops.Assert(math_ops.less_equal(x, 10.0), [x])]):
return array_ops.identity(x)
with self.test_session():
self.assertEqual(1.0, MyFn(1.0).eval())
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"assertion"):
_ = MyFn(100.0).eval()
@test_util.disable_c_api # Op._add_control_inputs doesn't work with C API
def testWhileLoopCallsFunc(self):
with self.test_session(use_gpu=True) as sess:
@function.Defun(dtypes.float32)
def Times2(x):
constant_two = constant_op.constant(2, dtypes.int32)
two_on_gpu = math_ops.cast(constant_two, dtypes.float32)
return x * two_on_gpu
def Body(x):
x2 = Times2(x)
x2.set_shape([])
return x2
loop = control_flow_ops.while_loop(lambda x: x < 1e5, Body, [1.0])
ans = sess.run(loop)
self.assertAllClose(ans, 131072.)
@test_util.disable_c_api # Op._add_control_inputs doesn't work with C API
def testControlFlowStrictness(self):
"""Inlined functions must not execute in a untaken control flow branch."""
@function.Defun(dtypes.int32)
def AssertFail(x):
# Assertion that always fails and does not have a data dependency on `x`.
assert_false = control_flow_ops.Assert(False, [42])
with ops.control_dependencies([assert_false]):
return array_ops.identity(x)
with ops.device("CPU"):
pred = array_ops.placeholder(dtypes.bool)
x = array_ops.placeholder(dtypes.int32)
cond = control_flow_ops.cond(pred, lambda: x + 1, lambda: AssertFail(x))
# pylint: disable=unnecessary-lambda
loop = control_flow_ops.while_loop(lambda y: pred,
lambda y: AssertFail(y), [x])
# pylint: enable=unnecessary-lambda
# Enables inlining.
config = config_pb2.ConfigProto(graph_options=config_pb2.GraphOptions(
optimizer_options=config_pb2.OptimizerOptions(
opt_level=config_pb2.OptimizerOptions.L0,
do_common_subexpression_elimination=True,
do_function_inlining=True,
do_constant_folding=True)))
with session.Session(config=config) as sess:
# Since the 'False' branch is not taken, the assertion should not fire.
self.assertEqual(4, sess.run(cond, {pred: True, x: 3}))
# The assertion should still fire if the False branch is taken.
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"assertion"):
sess.run(cond, {pred: False, x: 3})
# Similarly for loops.
self.assertEqual(3, sess.run(loop, {pred: False, x: 3}))
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"assertion"):
sess.run(loop, {pred: True, x: 3})
def testVar(self):
@function.Defun(dtypes.float32)
def Foo(x):
return x * x + 1
g = ops.Graph()
with g.as_default():
v = variables.Variable(constant_op.constant(10.0))
z = Foo(v)
with self.test_session(graph=g):
variables.global_variables_initializer().run()
self.assertAllEqual(z.eval(), 101.)
def testResourceVarAsImplicitInput(self):
g = ops.Graph()
with g.as_default(), ops.device("cpu:0"):
v = variable_scope.get_variable(
"var", (4, 4), dtypes.float32, use_resource=True)
@function.Defun()
def Foo():
return array_ops.identity(v)
y = v.value()
z = Foo()
with self.test_session(graph=g):
v.initializer.run()
self.assertAllEqual(y.eval(), z.eval())
def testDefineErrors(self):
with ops.Graph().as_default():
with self.assertRaisesRegexp(ValueError, "can not return None"):
@function.Defun()
def TwoNone():
return None, None
_ = TwoNone.definition
with self.assertRaisesRegexp(ValueError, "are not supported"):
@function.Defun()
def DefaultArg(unused_a=12):
return constant_op.constant([1])
_ = DefaultArg.definition
with self.assertRaisesRegexp(ValueError, "are not supported"):
@function.Defun()
def KwArgs(**unused_kwargs):
return constant_op.constant([1])
_ = KwArgs.definition
with self.assertRaisesRegexp(ValueError, "specified input types"):
@function.Defun(dtypes.float32)
def PlusMinusV2(a, b):
return a + b, b - a
_ = PlusMinusV2.definition
with self.assertRaisesRegexp(ValueError, "specified input types"):
@function.Defun(dtypes.float32, dtypes.float32, dtypes.float32)
def PlusMinusV3(a, b):
return a + b, b - a
_ = PlusMinusV3.definition
def testCallErrors(self):
@function.Defun()
def Const():
return constant_op.constant(1)
@function.Defun(dtypes.int32)
def PlusOne(a):
return a + 1
@function.Defun(dtypes.int32, dtypes.int32)
def PlusMinus(a, b):
return a + b, b - a
with ops.Graph().as_default():
_ = Const()
# pylint: disable=too-many-function-args
# pylint: disable=unexpected-keyword-arg
# pylint: disable=no-value-for-parameter
with self.assertRaisesRegexp(ValueError, "arguments: 0"):
_ = Const(1)
with self.assertRaisesRegexp(ValueError, "arguments: 0"):
_ = Const(1, 2)
with self.assertRaisesRegexp(ValueError, "arguments: 1"):
_ = PlusOne()
_ = PlusOne(1)
with self.assertRaisesRegexp(ValueError, "arguments: 1"):
_ = PlusOne(1, 2)
with self.assertRaisesRegexp(ValueError, "arguments: 2"):
_ = PlusMinus()
with self.assertRaisesRegexp(ValueError, "arguments: 2"):
_ = PlusMinus(1)
_ = PlusMinus(1, 2)
_ = PlusOne(1, name="p1")
with self.assertRaisesRegexp(ValueError, "Unknown keyword arguments"):
_ = PlusOne(1, device="/device:GPU:0")
def testFunctionDecorator(self):
@function.Defun(dtypes.float32, func_name="Minus1")
def Minus1(b):
return b - 1.0
with ops.Graph().as_default():
call1 = Minus1([2.])
self.assertTrue(isinstance(Minus1, function._DefinedFunction))
self.assertEqual(Minus1.name, "Minus1")
# pylint: disable=unexpected-keyword-arg
call2 = Minus1(call1, name="next")
# pylint: enable=unexpected-keyword-arg
self.assertEqual("next", call2.op.name)
with session.Session() as sess:
self.assertAllEqual([1], sess.run(call1))
self.assertAllEqual([0], sess.run(call2))
def testNestedFunction(self):
@function.Defun(dtypes.float32)
def Cube(x):
return x * x * x
@function.Defun(dtypes.float32, dtypes.float32)
def CubeXPlusY(x, y):
return Cube(x) + y
with ops.Graph().as_default():
z = CubeXPlusY(3.0, -2.0)
with self.test_session():
self.assertAllEqual(z.eval(), 25.0)
def testNestedDefinedFunction(self):
@function.Defun(dtypes.float32, dtypes.float32)
def CubeXPlusY(x, y):
@function.Defun(dtypes.float32)
def Cube(x):
return x * x * x
return Cube(x) + y
with ops.Graph().as_default():
z = CubeXPlusY(3.0, -2.0)
with self.test_session():
self.assertAllEqual(z.eval(), 25.0)
def testUnusedFunction(self):
invoked = False
# pylint: disable=unused-variable
@function.Defun()
def Unused():
invoked = True
return constant_op.constant(42.)
self.assertFalse(invoked)
g = ops.Graph()
with g.as_default():
@function.Defun()
def Unused2():
invoked = True
return constant_op.constant(7.)
constant_op.constant(3.)
# pylint: enable=unused-variable
self.assertFalse(invoked)
gdef = g.as_graph_def()
self.assertEqual(0, len(gdef.library.function))
def testReduction(self):
g = ops.Graph()
# BN0 is computing batch normed matrix along rows.
def BN0(x):
mean = math_ops.reduce_mean(x, [0])
var = math_ops.reduce_mean(math_ops.square(x - mean)) # biased var
rstd = math_ops.rsqrt(var + 1e-8)
return (x - mean) * rstd
# Wraps BatchNorm in a tf function.
@function.Defun(dtypes.float32)
def BN1(x):
return BN0(x)
with g.as_default():
x = array_ops.placeholder(dtypes.float32)
y0 = BN0(x) # A plain graph
y1 = BN1(x) # A tf function
dx0, = gradients_impl.gradients([y0], [x])
dx1, = gradients_impl.gradients([y1], [x])
# Both should produce the same result and gradient.
with self.test_session(graph=g) as sess:
vals = sess.run([y0, y1, dx0, dx1], {x: np.random.uniform(size=(3, 7))})
self.assertAllClose(vals[0], vals[1])
self.assertAllClose(vals[2], vals[3])
def testCapture(self):
g = ops.Graph()
with g.as_default():
w = variables.Variable(constant_op.constant([[1.0]]))
b = variables.Variable(constant_op.constant([2.0]))
# Foo() captures w and b.
@function.Defun(dtypes.float32)
def Foo(x):
# Plus() captures b.
@function.Defun(dtypes.float32)
def Plus(y):
return y + b
return Plus(math_ops.matmul(w, x))
y = Foo(constant_op.constant([[10.]]))
with self.test_session(graph=g):
variables.global_variables_initializer().run()
self.assertAllEqual(y.eval(), [[12.0]])
def testCaptureControls(self):
g = ops.Graph()
with g.as_default():
x = constant_op.constant([10.0])
x = logging_ops.Print(x, [x], "outer")
@function.Defun(dtypes.float32)
def Foo(y):
with ops.control_dependencies([x]):
y = logging_ops.Print(y, [y], "inner")
return y
with self.assertRaisesRegexp(ValueError, "not an element of this graph."):
# NOTE: We still do not support capturing control deps.
_ = Foo(x)
def testCaptureInWhileLoop(self):
g = ops.Graph()
with g.as_default():
x = constant_op.constant(1)
@function.Defun()
def Foo():
return control_flow_ops.while_loop(lambda i: i < 10,
lambda i: i + x,
[0])
y = Foo()
with self.test_session(graph=g) as sess:
self.assertEqual(sess.run(y), 10)
def testCaptureInCond(self):
g = ops.Graph()
with g.as_default():
x = constant_op.constant(1)
@function.Defun(dtypes.bool)
def Foo(pred):
return control_flow_ops.cond(pred,
lambda: x,
lambda: x + 1)
y = Foo(True)
z = Foo(False)
with self.test_session(graph=g) as sess:
self.assertEqual(sess.run(y), 1)
self.assertEqual(sess.run(z), 2)
def testStableName(self):
@function.Defun()
def Foo(x, y, z):
return math_ops.tanh(math_ops.matmul(x, y) + z)
# We added more randomness to function names in C API.
# TODO(iga): Remove this if statement when we switch to C API.
if ops._USE_C_API: # pylint: disable=protected-access
self.assertEqual("Foo_aCYSbwBkR5A",
Foo.instantiate([dtypes.float32] * 3).name)
else:
self.assertEqual("Foo_d643acf7",
Foo.instantiate([dtypes.float32] * 3).name)
def testSignatureHash(self):
# Foo.Inner and Bar.Inner have identical function body but have
# different signatures. They should be treated as two different functions.
@function.Defun()
def Foo(x):
@function.Defun()
def Inner(x):
return x + 10.
return Inner(x)
@function.Defun()
def Bar(x):
@function.Defun()
def Inner(x, unused_y, unused_z):
return x + 10.
return Inner(x, 2., 3.)
g = ops.Graph()
with g.as_default():
x = constant_op.constant(10.0)
y = Foo(x)
z = Bar(x)
with self.test_session(graph=g) as sess:
v0, v1 = sess.run([y, z])
self.assertAllEqual(v0, 20.)
self.assertAllEqual(v1, 20.)
def testShapeFunction(self):
@function.Defun(
dtypes.float32, shape_func=lambda op: [op.inputs[0].get_shape()])
def Foo(x):
return x + 1.0
@function.Defun(
shape_func=lambda op: [[1] + op.inputs[0].get_shape().as_list()])
def Bar(x):
return array_ops.stack([x])
g = ops.Graph()
with g.as_default():
x = Foo([1.0, 2.0])
self.assertEqual(x.get_shape().as_list(), [2])
y = Bar(array_ops.zeros([1, 2, 3]))
self.assertAllEqual(y.get_shape().as_list(), [1, 1, 2, 3])
def testVariableReuse(self):
def LinearWithReuse(input_tensor, reuse=None):
size = input_tensor.shape.dims[1]
with variable_scope.variable_scope("linear", reuse=reuse):
w = variable_scope.get_variable(
"w", shape=[size, size], dtype=input_tensor.dtype)
return math_ops.matmul(input_tensor, w)
@function.Defun(dtypes.float32)
def Foo(inputs):
inputs = array_ops.reshape(inputs, [32, 100])
hidden = LinearWithReuse(inputs)
return LinearWithReuse(hidden, reuse=True)
input_op = array_ops.placeholder(shape=[32, 100], dtype=dtypes.float32)
output_op = Foo(input_op)
global_vars = variables.global_variables()
self.assertEqual(len(global_vars), 1)
self.assertEqual(global_vars[0].name, "linear/w:0")
with session.Session() as sess:
sess.run(variables.global_variables_initializer())
output_val = sess.run(
output_op, feed_dict={input_op: np.random.rand(32, 100)})
self.assertEqual(output_val.shape, (32, 100))
def testFunctionCallInDifferentVariableScopes(self):
@function.Defun(dtypes.float32)
def Foo(inputs):
var = variable_scope.get_variable(
"var",
shape=[10],
dtype=dtypes.float32,
initializer=init_ops.ones_initializer())
return inputs + var
input_op = array_ops.placeholder(shape=[10], dtype=dtypes.float32)
with variable_scope.variable_scope("vs1"):
out1_op = Foo(input_op)
with variable_scope.variable_scope("vs2"):
out2_op = Foo(input_op)
global_vars = variables.global_variables()
self.assertEqual(len(global_vars), 1)
self.assertEqual(global_vars[0].name, "vs1/var:0")
with session.Session() as sess:
sess.run(variables.global_variables_initializer())
out1, out2 = sess.run(
[out1_op, out2_op], feed_dict={input_op: np.linspace(1, 10, 10)})
self.assertAllEqual(out1, np.linspace(2, 11, 10))
self.assertAllEqual(out2, np.linspace(2, 11, 10))
def testTwoInputsSameOp(self):
g = ops.Graph()
with g.as_default():
m = array_ops.placeholder(dtypes.float32)
s, u, v = linalg_ops.svd(m)
ss = math_ops.reduce_sum(s)
uu = math_ops.reduce_sum(u)
vv = math_ops.reduce_sum(v)
result = ss + uu + vv
f = graph_to_function_def.graph_to_function_def(
g,
g.get_operations()[1:], # skip the placeholder
[s, u, v],
[result])
self.assertEqual(len(f.signature.input_arg), 3)
def testGradientWithIntegerFunctionArgument(self):
@function.Defun(dtypes.int32, dtypes.float32)
def Foo(t, x):
return x[t]
g = ops.Graph()
with g.as_default():
inp = array_ops.placeholder(dtypes.float32)
t = constant_op.constant(0, dtypes.int32)
out = Foo(t, inp)
dinp, = gradients_impl.gradients(out, [inp])
x = np.zeros((2,)).astype(np.float32)
with session.Session(graph=g) as sess:
self.assertAllClose(
np.array([1.0, 0.0]).astype(np.float32),
sess.run(dinp, {inp: x}))
def testStatefulFunction(self):
@function.Defun()
def FunctionWithStatelessOp():
return constant_op.constant(42.0)
@function.Defun()
def FunctionWithStatefulOp():
return random_ops.random_uniform([100], maxval=10, dtype=dtypes.int32)
@function.Defun()
def FunctionWithStatelessFunctionCall():
return FunctionWithStatelessOp()
@function.Defun()
def FunctionWithStatefulFunctionCall():
return FunctionWithStatefulOp()
# Test that the `is_stateful` bit is propagated.
self.assertFalse(FunctionWithStatelessOp.definition.signature.is_stateful)
self.assertTrue(FunctionWithStatefulOp.definition.signature.is_stateful)
self.assertFalse(
FunctionWithStatelessFunctionCall.definition.signature.is_stateful)
self.assertTrue(
FunctionWithStatefulFunctionCall.definition.signature.is_stateful)
# Ensure that two invocations of the same random-number-generating
# function produce different results.
result1 = FunctionWithStatefulFunctionCall()
result2 = FunctionWithStatefulFunctionCall()
# Statefulness affects how the function is treated by the various
# optimization passes, so run the test in each optimizer
# configuration.
for config in _OptimizerOptions():
with session.Session(config=config) as sess:
val1, val2 = sess.run((result1, result2))
self.assertFalse(all(val1 == val2))
val3, val4 = sess.run((result1, result2))
self.assertFalse(all(val3 == val1))
self.assertFalse(all(val4 == val2))
@test_util.with_c_api
class FunctionsFromProtos(test.TestCase):
def expectFunctionsEqual(self, func, grad_func=None, new_func=None):
if new_func is None:
# Make a copy of func.definition to avoid any bugs masked by using the
# same object
serialized_fdef = func.definition.SerializeToString()
# Serialize and then deserialize `func` to create `new_func`
fdef = function_pb2.FunctionDef.FromString(serialized_fdef)
new_func = function._from_definition(fdef, grad_func=grad_func)
self.assertEqual(func.name, new_func.name)
self.assertEqual(func.definition, new_func.definition)
self.assertEqual(func.grad_func_name, new_func.grad_func_name)
self.assertEqual(func.declared_input_types, new_func.declared_input_types)
self.assertEqual(func.captured_inputs, new_func.captured_inputs)
def testBasic(self):
@function.Defun(dtypes.float32, dtypes.float32)
def Foo(x, y):
return x + y
self.expectFunctionsEqual(Foo)
def testGradFunc(self):
@function.Defun(dtypes.float32, dtypes.float32)
def G(x, dy):
return x * dy
@function.Defun(dtypes.float32, grad_func=G)
def F(x):
return math_ops.exp(x) - math_ops.exp(-x)
self.expectFunctionsEqual(F, grad_func=G)
def testCapturedInputs(self):
c = constant_op.constant(10, dtypes.int64)
@function.Defun(dtypes.int64)
def Foo(x):
return x + c
new_func = function._from_definition(Foo.definition)
self.assertEqual(Foo.name, new_func.name)
self.assertEqual(Foo.definition, new_func.definition)
self.assertEqual(Foo.grad_func_name, new_func.grad_func_name)
# Captured inputs are added as regular inputs to the function definition
self.assertEqual(new_func.declared_input_types,
Foo.declared_input_types + (dtypes.int64,))
self.assertEqual(len(new_func.captured_inputs), 0)
def testNestedFunctions(self):
@function.Defun(dtypes.float32)
def Outer(x):
@function.Defun(dtypes.float32)
def Inner(y):
return y + 1
return Inner(Inner(x))
self.expectFunctionsEqual(Outer)
def testFromLibrary(self):
# Define some functions with different gradient functions. Note that many of
# the below functions are identical since function bodies don't matter for
# this test.
@function.Defun(dtypes.float32, dtypes.float32)
def G1(x, dy):
return x * dy
@function.Defun(dtypes.float32, dtypes.float32)
def G2(x, dy):
return x * dy
# F1 and F2 have the same gradient function
@function.Defun(dtypes.float32, grad_func=G1)
def F1(x):
return math_ops.exp(x) - math_ops.exp(-x)
@function.Defun(dtypes.float32, grad_func=G1)
def F2(x):
return math_ops.exp(x) - math_ops.exp(-x)
# F3 has a different gradient function
@function.Defun(dtypes.float32, grad_func=G2)
def F3(x):
return math_ops.exp(x) - math_ops.exp(-x)
# F4 has no gradient function
@function.Defun(dtypes.float32)
def F4(x):
return math_ops.exp(x) - math_ops.exp(-x)
# Instantiate all functions
g = ops.Graph()
with g.as_default():
c = constant_op.constant(1.0, dtypes.float32)
f1 = F1(c)
f2 = F2(c)
f3 = F3(c)
f4 = F4(c)
gradients_impl.gradients([f1, f2, f3, f4], c)
library = g.as_graph_def().library
new_funcs = function._from_library(library)
def CheckNewFunc(func):
new_func = [f for f in new_funcs if f.name == func.name]
self.assertEqual(len(new_func), 1)
self.expectFunctionsEqual(func, new_func=new_func[0])
CheckNewFunc(G1)
CheckNewFunc(G2)
CheckNewFunc(F1)
CheckNewFunc(F2)
CheckNewFunc(F3)
CheckNewFunc(F4)
def testFromLibraryEmptyLib(self):
library = function_pb2.FunctionDefLibrary()
self.assertEqual(len(function._from_library(library)), 0)
def testFromLibraryMissingFuncDef(self):
@function.Defun(dtypes.float32, dtypes.float32)
def G1(x, dy):
return x * dy
@function.Defun(dtypes.float32)
def F1(x):
return math_ops.exp(x) - math_ops.exp(-x)
gradient = function_pb2.GradientDef()
gradient.function_name = F1.name
gradient.gradient_func = G1.name
# Create invalid function def that is missing G1 function def
library = function_pb2.FunctionDefLibrary()
library.gradient.extend([gradient])
library.function.extend([F1.definition])
with self.assertRaisesRegexp(
ValueError,
"FunctionDefLibrary missing 'G1_[0-9a-zA-Z]{8,11}' FunctionDef"):
function._from_library(library)
# Create invalid function def that is missing F1 function def
library = function_pb2.FunctionDefLibrary()
library.gradient.extend([gradient])
library.function.extend([G1.definition])
with self.assertRaisesRegexp(
ValueError,
"FunctionDefLibrary missing 'F1_[0-9a-zA-Z]{8,11}' FunctionDef"):
function._from_library(library)
def testFromLibraryCyclicGradFuncs(self):
@function.Defun(dtypes.float32)
def F1(x):
return math_ops.exp(x) - math_ops.exp(-x)
@function.Defun(dtypes.float32)
def F2(x):
return math_ops.exp(x) - math_ops.exp(-x)
# Create invalid function def library where F1 has gradient function F2 and
# F2 has gradient function F1
library = function_pb2.FunctionDefLibrary()
library.function.extend([F1.definition, F2.definition])
gradient1 = function_pb2.GradientDef()
gradient1.function_name = F1.name
gradient1.gradient_func = F2.name
gradient2 = function_pb2.GradientDef()
gradient2.function_name = F2.name
gradient2.gradient_func = F1.name
library.gradient.extend([gradient1, gradient2])
with self.assertRaisesRegexp(
ValueError, "FunctionDefLibrary contains cyclic gradient functions!"):
function._from_library(library)
@test_util.with_c_api
class FunctionOverloadTest(test.TestCase):
def testBasic(self):
@function.Defun()
def Sinh(x):
return 1 / 2. * (math_ops.exp(x) - math_ops.exp(-x))
g = ops.Graph()
with g.as_default():
x = Sinh(constant_op.constant(0.25, dtypes.float32))
y = Sinh(constant_op.constant(0.25, dtypes.float64))
with self.test_session(graph=g):
self.assertAllClose(x.eval(), np.sinh(0.25))
self.assertAllClose(y.eval(), | np.sinh(0.25) | numpy.sinh |
import numpy as np
from sklearn.model_selection import train_test_split
import seaborn as sns
from sklearn.neural_network import MLPRegressor
from sklearn.ensemble import (
AdaBoostRegressor, GradientBoostingRegressor, RandomForestRegressor)
from sklearn.linear_model import Lasso, OrthogonalMatchingPursuit
from rootcp import models
from sklearn.datasets import make_regression
from sklearn import datasets
import intervals
def oracleCP(X, y, model, alpha=0.1):
model.fit(X, y)
residual = model.conformity(y, model.predict(X))
q_alpha = np.quantile(residual, 1 - alpha)
mu = model.predict(X[-1, :])
lb = mu - q_alpha
ub = mu + q_alpha
return [lb, ub]
def splitCP(X, y, model, alpha=0.1):
X_train, X_test, Y_train, Y_test = train_test_split(X[:-1], y,
test_size=0.5)
model.fit(X_train, Y_train)
# Ranking on the calibration set
sorted_residual = np.sort(model.conformity(Y_test, model.predict(X_test)))
index = int((X.shape[0] / 2 + 1) * (1 - alpha))
# Double check index - 1 (because numpy tab start at 0)
quantile = sorted_residual[index]
mu_ = model.predict(X[-1, :])
return [mu_ - quantile, mu_ + quantile]
def splitCPP(X, y, model, alpha=0.1):
scp = splitCP(X, y, model, alpha)
mu = 0.5 * (scp[0] + scp[1])
# y_mu = np.array(list(y) + [mu[0]])
y_mu = np.array(list(y) + [0])
model.fit(X, y_mu)
y_pred = model.predict(X[-1])
print("y_pred =", y_pred, "y_scp =", mu)
if scp[0] <= y_pred and y_pred <= scp[1]:
bound = max(scp[1] - y_pred, y_pred - scp[0])
print("ok", scp[1] - scp[0], bound)
return [y_pred - bound, y_pred + bound]
return scp
def ridgeCP(X, y, lmd, alpha=0.1):
n_samples, n_features = X.shape
H = X.T.dot(X) + lmd * np.eye(n_features)
C = np.eye(n_samples) - X.dot(np.linalg.solve(H, X.T))
A = C.dot(list(y) + [0])
B = C[:, -1]
negative_B = np.where(B < 0)[0]
A[negative_B] *= -1
B[negative_B] *= -1
S, U, V = [], [], []
for i in range(n_samples):
if B[i] != B[-1]:
tmp_u_i = (A[i] - A[-1]) / (B[-1] - B[i])
tmp_v_i = -(A[i] + A[-1]) / (B[-1] + B[i])
u_i, v_i = np.sort([tmp_u_i, tmp_v_i])
U += [u_i]
V += [v_i]
elif B[i] != 0:
tmp_uv = -0.5 * (A[i] + A[-1]) / B[i]
U += [tmp_uv]
V += [tmp_uv]
if B[-1] > B[i]:
S += [intervals.closed(U[i], V[i])]
elif B[-1] < B[i]:
intvl_u = intervals.openclosed(-np.inf, U[i])
intvl_v = intervals.closedopen(V[i], np.inf)
S += [intvl_u.union(intvl_v)]
elif B[-1] == B[i] and B[i] > 0 and A[-1] < A[i]:
S += [intervals.closedopen(U[i], np.inf)]
elif B[-1] == B[i] and B[i] > 0 and A[-1] > A[i]:
S += [intervals.openclosed(-np.inf, U[i])]
elif B[-1] == B[i] and B[i] == 0 and abs(A[-1]) <= abs(A[i]):
S += [intervals.open(-np.inf, np.inf)]
elif B[-1] == B[i] and B[i] == 0 and abs(A[-1]) > abs(A[i]):
S += [intervals.empty()]
elif B[-1] == B[i] and A[-1] == A[i]:
S += [intervals.open(-np.inf, np.inf)]
else:
print("boom !!!")
hat_y = np.sort([-np.inf] + U + V + [np.inf])
size = hat_y.shape[0]
conf_pred = intervals.empty()
p_values = | np.zeros(size) | numpy.zeros |
import numpy as np
from scipy.spatial import cKDTree
def triage(scores, spike_index, triage_k,
triage_percent, location_feature):
"""
Triage based on KNN distance.
It removes triage_percent*100% of data
Parameters
----------
scores: list (n_channels)
A list such that scores[c] contains all scores whose main
channel is c
spike_index: list (n_channels)
A list such that spike_index[c] cointains all spike times
whose channel is c
triage_k: int
number of neighbors to consider
triage_percent: float
percentage of data to be triaged.
It is a number between 0 and 1.
Returns
-------
scores: list (n_channels)
scores after triage
spike_index: list (n_channels)
spike_index after traige
"""
# relevant info
n_channels = np.max(spike_index[:, 1]) + 1
th = (1 - triage_percent)*100
idx_triage = np.zeros(scores.shape[0], 'bool')
for channel in range(n_channels):
idx_data = np.where(spike_index[:, 1] == channel)[0]
scores_channel = scores[
idx_data, :, 0]
nc = scores_channel.shape[0]
if nc > triage_k + 1:
if location_feature:
th = (1 - triage_percent/2)*100
# get distance to nearest neighbors
tree = cKDTree(scores_channel[:, :2])
dist, ind = tree.query(scores_channel[:, :2], k=triage_k + 1)
dist = np.sum(dist, 1)
# triage far ones
idx_triage[idx_data[dist > | np.percentile(dist, th) | numpy.percentile |
# This module has been generated automatically from space group information
# obtained from the Computational Crystallography Toolbox
#
"""
Space groups
This module contains a list of all the 230 space groups that can occur in
a crystal. The variable space_groups contains a dictionary that maps
space group numbers and space group names to the corresponding space
group objects.
.. moduleauthor:: <NAME> <<EMAIL>>
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2013 The Mosaic Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file LICENSE.txt, distributed as part of this software.
#-----------------------------------------------------------------------------
import numpy as N
class SpaceGroup(object):
"""
Space group
All possible space group objects are created in this module. Other
modules should access these objects through the dictionary
space_groups rather than create their own space group objects.
"""
def __init__(self, number, symbol, transformations):
"""
:param number: the number assigned to the space group by
international convention
:type number: int
:param symbol: the Hermann-Mauguin space-group symbol as used
in PDB and mmCIF files
:type symbol: str
:param transformations: a list of space group transformations,
each consisting of a tuple of three
integer arrays (rot, tn, td), where
rot is the rotation matrix and tn/td
are the numerator and denominator of the
translation vector. The transformations
are defined in fractional coordinates.
:type transformations: list
"""
self.number = number
self.symbol = symbol
self.transformations = transformations
self.transposed_rotations = N.array([N.transpose(t[0])
for t in transformations])
self.phase_factors = N.exp(N.array([(-2j*N.pi*t[1])/t[2]
for t in transformations]))
def __repr__(self):
return "SpaceGroup(%d, %s)" % (self.number, repr(self.symbol))
def __len__(self):
"""
:return: the number of space group transformations
:rtype: int
"""
return len(self.transformations)
def symmetryEquivalentMillerIndices(self, hkl):
"""
:param hkl: a set of Miller indices
:type hkl: Scientific.N.array_type
:return: a tuple (miller_indices, phase_factor) of two arrays
of length equal to the number of space group
transformations. miller_indices contains the Miller
indices of each reflection equivalent by symmetry to the
reflection hkl (including hkl itself as the first element).
phase_factor contains the phase factors that must be applied
to the structure factor of reflection hkl to obtain the
structure factor of the symmetry equivalent reflection.
:rtype: tuple
"""
hkls = N.dot(self.transposed_rotations, hkl)
p = N.multiply.reduce(self.phase_factors**hkl, -1)
return hkls, p
space_groups = {}
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(1, 'P 1', transformations)
space_groups[1] = sg
space_groups['P 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(2, 'P -1', transformations)
space_groups[2] = sg
space_groups['P -1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(3, 'P 1 2 1', transformations)
space_groups[3] = sg
space_groups['P 1 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(4, 'P 1 21 1', transformations)
space_groups[4] = sg
space_groups['P 1 21 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(5, 'C 1 2 1', transformations)
space_groups[5] = sg
space_groups['C 1 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(6, 'P 1 m 1', transformations)
space_groups[6] = sg
space_groups['P 1 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(7, 'P 1 c 1', transformations)
space_groups[7] = sg
space_groups['P 1 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(8, 'C 1 m 1', transformations)
space_groups[8] = sg
space_groups['C 1 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(9, 'C 1 c 1', transformations)
space_groups[9] = sg
space_groups['C 1 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(10, 'P 1 2/m 1', transformations)
space_groups[10] = sg
space_groups['P 1 2/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(11, 'P 1 21/m 1', transformations)
space_groups[11] = sg
space_groups['P 1 21/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(12, 'C 1 2/m 1', transformations)
space_groups[12] = sg
space_groups['C 1 2/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(13, 'P 1 2/c 1', transformations)
space_groups[13] = sg
space_groups['P 1 2/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(14, 'P 1 21/c 1', transformations)
space_groups[14] = sg
space_groups['P 1 21/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(15, 'C 1 2/c 1', transformations)
space_groups[15] = sg
space_groups['C 1 2/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(16, 'P 2 2 2', transformations)
space_groups[16] = sg
space_groups['P 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(17, 'P 2 2 21', transformations)
space_groups[17] = sg
space_groups['P 2 2 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(18, 'P 21 21 2', transformations)
space_groups[18] = sg
space_groups['P 21 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(19, 'P 21 21 21', transformations)
space_groups[19] = sg
space_groups['P 21 21 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(20, 'C 2 2 21', transformations)
space_groups[20] = sg
space_groups['C 2 2 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(21, 'C 2 2 2', transformations)
space_groups[21] = sg
space_groups['C 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(22, 'F 2 2 2', transformations)
space_groups[22] = sg
space_groups['F 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(23, 'I 2 2 2', transformations)
space_groups[23] = sg
space_groups['I 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(24, 'I 21 21 21', transformations)
space_groups[24] = sg
space_groups['I 21 21 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(25, 'P m m 2', transformations)
space_groups[25] = sg
space_groups['P m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(26, 'P m c 21', transformations)
space_groups[26] = sg
space_groups['P m c 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(27, 'P c c 2', transformations)
space_groups[27] = sg
space_groups['P c c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(28, 'P m a 2', transformations)
space_groups[28] = sg
space_groups['P m a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(29, 'P c a 21', transformations)
space_groups[29] = sg
space_groups['P c a 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(30, 'P n c 2', transformations)
space_groups[30] = sg
space_groups['P n c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(31, 'P m n 21', transformations)
space_groups[31] = sg
space_groups['P m n 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(32, 'P b a 2', transformations)
space_groups[32] = sg
space_groups['P b a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(33, 'P n a 21', transformations)
space_groups[33] = sg
space_groups['P n a 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(34, 'P n n 2', transformations)
space_groups[34] = sg
space_groups['P n n 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(35, 'C m m 2', transformations)
space_groups[35] = sg
space_groups['C m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(36, 'C m c 21', transformations)
space_groups[36] = sg
space_groups['C m c 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(37, 'C c c 2', transformations)
space_groups[37] = sg
space_groups['C c c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(38, 'A m m 2', transformations)
space_groups[38] = sg
space_groups['A m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(39, 'A b m 2', transformations)
space_groups[39] = sg
space_groups['A b m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(40, 'A m a 2', transformations)
space_groups[40] = sg
space_groups['A m a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(41, 'A b a 2', transformations)
space_groups[41] = sg
space_groups['A b a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(42, 'F m m 2', transformations)
space_groups[42] = sg
space_groups['F m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(43, 'F d d 2', transformations)
space_groups[43] = sg
space_groups['F d d 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(44, 'I m m 2', transformations)
space_groups[44] = sg
space_groups['I m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(45, 'I b a 2', transformations)
space_groups[45] = sg
space_groups['I b a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(46, 'I m a 2', transformations)
space_groups[46] = sg
space_groups['I m a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(47, 'P m m m', transformations)
space_groups[47] = sg
space_groups['P m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(48, 'P n n n :2', transformations)
space_groups[48] = sg
space_groups['P n n n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(49, 'P c c m', transformations)
space_groups[49] = sg
space_groups['P c c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(50, 'P b a n :2', transformations)
space_groups[50] = sg
space_groups['P b a n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(51, 'P m m a', transformations)
space_groups[51] = sg
space_groups['P m m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(52, 'P n n a', transformations)
space_groups[52] = sg
space_groups['P n n a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(53, 'P m n a', transformations)
space_groups[53] = sg
space_groups['P m n a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(54, 'P c c a', transformations)
space_groups[54] = sg
space_groups['P c c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(55, 'P b a m', transformations)
space_groups[55] = sg
space_groups['P b a m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(56, 'P c c n', transformations)
space_groups[56] = sg
space_groups['P c c n'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(57, 'P b c m', transformations)
space_groups[57] = sg
space_groups['P b c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(58, 'P n n m', transformations)
space_groups[58] = sg
space_groups['P n n m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(59, 'P m m n :2', transformations)
space_groups[59] = sg
space_groups['P m m n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(60, 'P b c n', transformations)
space_groups[60] = sg
space_groups['P b c n'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(61, 'P b c a', transformations)
space_groups[61] = sg
space_groups['P b c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(62, 'P n m a', transformations)
space_groups[62] = sg
space_groups['P n m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(63, 'C m c m', transformations)
space_groups[63] = sg
space_groups['C m c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(64, 'C m c a', transformations)
space_groups[64] = sg
space_groups['C m c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(65, 'C m m m', transformations)
space_groups[65] = sg
space_groups['C m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(66, 'C c c m', transformations)
space_groups[66] = sg
space_groups['C c c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(67, 'C m m a', transformations)
space_groups[67] = sg
space_groups['C m m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(68, 'C c c a :2', transformations)
space_groups[68] = sg
space_groups['C c c a :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(69, 'F m m m', transformations)
space_groups[69] = sg
space_groups['F m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,3,3])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,0,3])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(70, 'F d d d :2', transformations)
space_groups[70] = sg
space_groups['F d d d :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(71, 'I m m m', transformations)
space_groups[71] = sg
space_groups['I m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(72, 'I b a m', transformations)
space_groups[72] = sg
space_groups['I b a m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(73, 'I b c a', transformations)
space_groups[73] = sg
space_groups['I b c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(74, 'I m m a', transformations)
space_groups[74] = sg
space_groups['I m m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(75, 'P 4', transformations)
space_groups[75] = sg
space_groups['P 4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(76, 'P 41', transformations)
space_groups[76] = sg
space_groups['P 41'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(77, 'P 42', transformations)
space_groups[77] = sg
space_groups['P 42'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(78, 'P 43', transformations)
space_groups[78] = sg
space_groups['P 43'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(79, 'I 4', transformations)
space_groups[79] = sg
space_groups['I 4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(80, 'I 41', transformations)
space_groups[80] = sg
space_groups['I 41'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(81, 'P -4', transformations)
space_groups[81] = sg
space_groups['P -4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(82, 'I -4', transformations)
space_groups[82] = sg
space_groups['I -4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(83, 'P 4/m', transformations)
space_groups[83] = sg
space_groups['P 4/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(84, 'P 42/m', transformations)
space_groups[84] = sg
space_groups['P 42/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(85, 'P 4/n :2', transformations)
space_groups[85] = sg
space_groups['P 4/n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(86, 'P 42/n :2', transformations)
space_groups[86] = sg
space_groups['P 42/n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(87, 'I 4/m', transformations)
space_groups[87] = sg
space_groups['I 4/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,5,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(88, 'I 41/a :2', transformations)
space_groups[88] = sg
space_groups['I 41/a :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(89, 'P 4 2 2', transformations)
space_groups[89] = sg
space_groups['P 4 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(90, 'P 4 21 2', transformations)
space_groups[90] = sg
space_groups['P 4 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(91, 'P 41 2 2', transformations)
space_groups[91] = sg
space_groups['P 41 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(92, 'P 41 21 2', transformations)
space_groups[92] = sg
space_groups['P 41 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(93, 'P 42 2 2', transformations)
space_groups[93] = sg
space_groups['P 42 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(94, 'P 42 21 2', transformations)
space_groups[94] = sg
space_groups['P 42 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(95, 'P 43 2 2', transformations)
space_groups[95] = sg
space_groups['P 43 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(96, 'P 43 21 2', transformations)
space_groups[96] = sg
space_groups['P 43 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = | N.array([0,1,0,1,0,0,0,0,-1]) | numpy.array |
"""Physics functions for the dsigma pipeline."""
import numpy as np
from astropy import constants as c
from astropy import units as u
from scipy.special import jv, jn_zeros
from astropy.cosmology import FlatLambdaCDM
__all__ = ['mpc_per_degree', 'projection_angle', 'critical_surface_density',
'effective_critical_surface_density',
'lens_magnification_shear_bias']
_sigma_crit_factor = (c.c**2 / (4 * np.pi * c.G)).to(u.Msun / u.pc).value
def mpc_per_degree(z, cosmology=FlatLambdaCDM(H0=100, Om0=0.3),
comoving=False):
"""Estimate the angular scale in Mpc/degree at certain redshift.
Parameters
----------
cosmology : astropy.cosmology, optional
Cosmology to assume for calculations.
z : float or numpy array
Redshift of the object.
comoving : boolen
Use comoving distance instead of physical distance when True.
Default: False
Returns
-------
float or numpy array
Physical scale in unit of Mpc/degree.
"""
if comoving:
return (cosmology.comoving_transverse_distance(z).to(u.Mpc).value *
np.deg2rad(1))
return (cosmology.angular_diameter_distance(z).to(u.Mpc).value *
np.deg2rad(1))
def projection_angle(ra_l, dec_l, ra_s, dec_s):
r"""Calculate projection angle between lens and sources.
Parameters
----------
ra_l, dec_l : float or numpy array
Coordinates of the lens galaxies in degrees.
ra_s, dec_s : float or numpy array
Coordinates of the source galaxies in degrees.
Returns
-------
cos_2phi, sin_2phi : float or numpy array
The :math:`\cos` and :math:`\sin` of :math:`2 \phi`, where
:math:`\phi` is the angle measured from right ascension direction to a
line connecting the lens and source galaxies.
"""
# Convert everything into radians.
ra_l, dec_l = np.deg2rad(ra_l), np.deg2rad(dec_l)
ra_s, dec_s = np.deg2rad(ra_s), np.deg2rad(dec_s)
# Calculate the tan(phi).
mask = np.cos(dec_s) * np.sin(ra_s - ra_l) != 0
if hasattr(mask, "__len__"):
tan_phi = (
(np.cos(dec_l) * | np.sin(dec_s) | numpy.sin |
"""
A minimal character-level LSTM model. Written by ZhengHe (@ZhengHe-MD)
This is derived from the following scripts:
- https://gist.github.com/karpathy/d4dee566867f8291f086
- https://github.com/eliben/deep-learning-samples/blob/master/min-char-rnn/min-char-rnn.py
- https://github.com/nicodjimenez/lstm
And you might find the following materials helpful:
- http://karpathy.github.io/2015/05/21/rnn-effectiveness/
- https://nicodjimenez.github.io/2014/08/08/lstm.html
- http://arxiv.org/abs/1506.00019
- https://colah.github.io/posts/2015-08-Understanding-LSTMs/
- https://explained.ai/matrix-calculus/index.html
To run:
$ python min_char_rnn_lstm.py <text file>
----
BSD License
"""
import numpy as np
import sys
if len(sys.argv) > 1:
filename = sys.argv[1]
else:
filename = 'input.txt'
with open(filename, 'r') as f:
data = f.read()
# All unique characters / entities in the data set.
chars = list(set(data))
chars.sort()
data_size, vocab_size = len(data), len(chars)
print('data has %d characters, %d unique.' % (data_size, vocab_size))
# Each character in the vocabulary gets a unique integer index assigned, in the
# half-open interval [0:N). These indices are useful to create one-hot encoded
# vectors that represent characters in numerical computations.
char_to_ix = {ch: i for i, ch in enumerate(chars)}
ix_to_char = {i: ch for i, ch in enumerate(chars)}
print('char_to_ix', char_to_ix)
print('ix_to_char', ix_to_char)
# Hyperparameters
hidden_size = 50 # size of hidden layer of neurons
seq_length = 16 # number of steps to unroll the RNN for
learning_rate = 1e-1
ub, lb = 0.1, -0.1
# LSTM
Wgs = np.random.randn(seq_length, hidden_size, hidden_size + vocab_size) * (ub - lb) + lb
Wis = np.random.randn(seq_length, hidden_size, hidden_size + vocab_size) * (ub - lb) + lb
Wfs = np.random.randn(seq_length, hidden_size, hidden_size + vocab_size) * (ub - lb) + lb
Wos = np.random.randn(seq_length, hidden_size, hidden_size + vocab_size) * (ub - lb) + lb
bgs = np.zeros((seq_length, hidden_size, 1))
bis = np.zeros((seq_length, hidden_size, 1))
bfs = np.zeros((seq_length, hidden_size, 1))
bos = np.zeros((seq_length, hidden_size, 1))
# Fully-connected
Why = np.random.randn(vocab_size, hidden_size) * (ub - lb) + lb
by = np.zeros((vocab_size, 1))
def lossFun(inputs, targets, hprev, sprev):
assert len(inputs) == seq_length
assert len(targets) == seq_length
xs, hs, ss, ps, ys = {}, {}, {}, {}, {}
gs, iis, fs, os = {}, {}, {}, {} # the `iis` here should be `is`, unfortunately `is` is a keyword in python
# Initial incoming state.
hs[-1] = np.copy(hprev)
ss[-1] = np.copy(sprev)
loss = 0
# Forward pass
for t in range(seq_length):
xs[t] = np.zeros((vocab_size, 1))
xs[t][inputs[t]] = 1
xc = np.vstack((xs[t], hs[t - 1]))
gs[t] = np.tanh(np.dot(Wgs[t], xc) + bgs[t])
iis[t] = sigmoid(np.dot(Wis[t], xc) + bis[t])
fs[t] = sigmoid(np.dot(Wfs[t], xc) + bfs[t])
os[t] = sigmoid( | np.dot(Wos[t], xc) | numpy.dot |
# Copyright (c) 2016 by <NAME> and the other collaborators on GitHub at
# https://github.com/rmjarvis/Piff All rights reserved.
#
# Piff is free software: Redistribution and use in source and binary forms
# with or without modification, are permitted provided that the following
# conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the disclaimer given in the accompanying LICENSE
# file.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the disclaimer given in the documentation
# and/or other materials provided with the distribution.
"""
.. module:: interp
"""
from __future__ import print_function
import numpy as np
import scipy.linalg
import galsim
import warnings
from .interp import Interp
from .star import Star, StarFit
class BasisInterp(Interp):
"""An Interp class that works whenever the interpolating functions are
linear sums of basis functions. Does things the "slow way" to be stable to
degenerate fits to individual stars, instead of fitting to parameter sets
produced by single stars.
First time coding this we will assume that each element of the PSF parameter
vector p is a linear combination of the same set of basis functions across the
focal plane,
p_i = \sum_{j} q_{ij} K_j(u,v,other stellar params).
The property degenerate_points is set to True to indicate that this interpolator
uses the alpha/beta quadratic form of chisq for each sample, rather than assuming
that a best-fit parameter vector is available at every sample.
Internally we'll store the interpolation coefficients in a 2d array of dimensions
(nparams, nbases)
Note: This is an abstract base class. The concrete class you probably want to use
is BasisPolynomial.
"""
def __init__(self):
self.degenerate_points = True # This Interpolator uses chisq quadratic forms
self.q = None
def initialize(self, stars, logger=None):
"""Initialize both the interpolator to some state prefatory to any solve iterations and
initialize the stars for use with this interpolator.
This class will initialize everything to have constant PSF parameter vector taken
from the first Star in the list.
:param stars: A list of Star instances to use to initialize.
:param logger: A logger object for logging debug info. [default: None]
:returns: A new list of Stars which have their parameters initialized.
"""
c = stars[0].fit.params.copy()
self.q = c[:,np.newaxis] * self.constant(1.)[np.newaxis,:]
stars = self.interpolateList(stars)
return stars
def basis(self, star):
"""Return 1d array of polynomial basis values for this star
:param star: A Star instance
:returns: 1d numpy array with values of u^i v^j for 0<i+j<=order
"""
raise NotImplementedError("Cannot call `basis` for abstract base class BasisInterp. "
"You probably want to use BasisPolynomial.")
def constant(self, value=1.):
"""Return 1d array of coefficients that represent a polynomial with constant value.
:param value: The value to use as the constant term. [default: 1.]
:returns: 1d numpy array with values of u^i v^j for 0<i+j<=order
"""
raise NotImplementedError("Cannot call `constant` for abstract base class BasisInterp. "
"You probably want to use BasisPolynomial.")
def solve(self, stars, logger=None):
"""Solve for the interpolation coefficients given some data.
The StarFit element of each Star in the list is assumed to hold valid
alpha and beta members specifying depending of chisq on differential
changes to its parameter vector.
:param stars: A list of Star instances to interpolate between
:param logger: A logger object for logging debug info. [default: None]
"""
logger = galsim.config.LoggerWrapper(logger)
if self.q is None:
raise RuntimeError("Attempt to solve() before initialize() of BasisInterp")
# Empty A and B
A = np.zeros( self.q.shape+self.q.shape, dtype=float)
B = np.zeros_like(self.q)
for s in stars:
# Get the basis function values at this star
K = self.basis(s)
# Sum contributions into A, B
B += s.fit.beta[:,np.newaxis] * K
tmp = s.fit.alpha[:,:,np.newaxis] * K
A += K[np.newaxis,:,np.newaxis,np.newaxis] * tmp[:,np.newaxis,:,:]
# Reshape to have single axis for all q's
B = B.flatten()
nq = B.shape[0]
A = A.reshape(nq,nq)
logger.debug('Beginning solution of matrix size %d',A.shape[0])
# cf. comments in pixelgrid.py about this function in scipy 1.0.0
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
logger.info('A.shape = %s',A.shape)
logger.info('B.shape = %s',B.shape)
dq = scipy.linalg.solve(A, B, assume_a='pos', check_finite=False)
if len(w) > 0:
logger.warning('Caught %s',w[0].message)
logger.debug('norm(A dq - B) = %s',scipy.linalg.norm(A.dot(dq) - B))
logger.debug('norm(dq) = %s',scipy.linalg.norm(dq))
if False:
logger.warning('Switching to svd solution')
Sd,U = scipy.linalg.eigh(A)
nsvd = np.sum(np.abs(Sd) > 1.e-15 * np.abs(Sd[-1]))
logger.info('2-condition is %e',np.abs(Sd[-1]/Sd[0]))
logger.info('nsvd = %d of %d',nsvd,len(Sd))
# Note: unlike scipy.linalg.svd, the Sd here is in *ascending* order, not descending.
Sd[-nsvd:] = 1./Sd[-nsvd:]
Sd[:-nsvd] = 0.
S = np.diag(Sd)
dq = U.dot(S.dot(U.T.dot(B)))
logger.info('norm(A dq - B) = %s',scipy.linalg.norm(A.dot(dq) - B))
logger.info('norm(dq) = %s',scipy.linalg.norm(dq))
logger.info('norm(q) = %s',scipy.linalg.norm(self.q))
logger.debug('...finished solution')
self.q += dq.reshape(self.q.shape)
def interpolate(self, star, logger=None):
"""Perform the interpolation to find the interpolated parameter vector at some position.
:param star: A Star instance to which one wants to interpolate
:param logger: A logger object for logging debug info. [default: None]
:returns: a new Star instance holding the interpolated parameters
"""
if self.q is None:
raise RuntimeError("Attempt to interpolate() before initialize() of BasisInterp")
K = self.basis(star)
p = np.dot(self.q,K)
fit = star.fit.newParams(p)
return Star(star.data, fit)
class BasisPolynomial(BasisInterp):
"""A version of the Polynomial interpolator that works with BasisModels and can use the
quadratic form of the chisq information it calculates. It works better than the regular
Polynomial interpolator when there is missing or degenerate information.
The order is the highest power of a key to be used. This can be the same for all keys
or you may provide a list of separate order values to be used for each key. (e.g. you
may want to use 2nd order in the positions, but only 1st order in the color).
All combinations of powers of keys that have total order <= max_order are used.
The maximum order is normally the maximum order of any given key's order, but you may
specify a larger value. (e.g. to use 1, x, y, xy, you would specify order=1, max_order=2.)
:param order: The order to use for each key. Can be a single value (applied to all
keys) or an array matching number of keys.
:param keys: List of keys for properties that will be used as the polynomial arguments.
[default: ('u','v')]
:param max_order: The maximum total order to use for cross terms between keys.
[default: None, which uses the maximum value of any individual key's order]
:param logger: A logger object for logging debug info. [default: None]
"""
def __init__(self, order, keys=('u','v'), max_order=None, logger=None):
super(BasisPolynomial, self).__init__()
self._keys = keys
if hasattr(order,'len'):
if not len(order)==len(keys):
raise ValueError('Number of provided orders does not match number of keys')
self._orders = order
else:
self._orders = (order,) * len(keys)
if max_order is None:
self._max_order = np.max(self._orders)
else:
self._max_order = max_order
if self._max_order<0 or np.any(np.array(self._orders) < 0):
# Exception if we have any requests for negative orders
raise ValueError('Negative polynomial order specified')
# TODO: Need to update the Interp write command to handle lists.
# Or write a custom BasisPolynomial.write function.
self.kwargs = {
'order' : order,
}
# Now build a mask that picks the desired polynomial products
# Start with 1d arrays giving orders in all dimensions
ord_ranges = [np.arange(order+1,dtype=int) for order in self._orders]
# Nifty trick to produce n-dim array holding total order
sumorder = np.sum(np.ix_(*ord_ranges))
self._mask = sumorder <= self._max_order
def getProperties(self, star):
return np.array([star.data[k] for k in self._keys], dtype=float)
def basis(self, star):
"""Return 1d array of polynomial basis values for this star
:param star: A Star instance
:returns: 1d numpy array with values of u^i v^j for 0<i+j<=order
"""
# Get the interpolation key values
vals = self.getProperties(star)
# Make 1d arrays of all needed powers of keys
pows1d = []
for i,o in enumerate(self._orders):
p = np.ones(o+1,dtype=float)
p[1:] = vals[i]
pows1d.append(np.cumprod(p))
# Use trick to produce outer product of all these powers
pows2d = np.prod( | np.ix_(*pows1d) | numpy.ix_ |
import matplotlib.pyplot as plt
import numpy as np
import os
import pickle
import glob
import re
import pprint
import pandas as pd
detected_folder = '../laughter-detection/detected_train_lauthter/' # checked folder if laughter is detected. it contains detected and also undeted folder
# laughter_file = 'dia991_utt7/'
df = pd.read_csv('../MELD/data/MELD/train_sent_emo.csv', header=0)
# video_utterances = meld_features[5]
# emotion_labels = meld_features[2]#emotion labels:{'neutral': 0, 'surprise': 1, 'fear': 2, 'sadness': 3, 'joy': 4, 'disgust': 5, 'anger': 6}
# sentiment_labels = meld_features[8] #sentiment labels: {'neutral': 0, 'positive': 1, 'negative': 2}
sample = df[(df['Dialogue_ID'] == 0) & (df['Utterance_ID'] == 3)] # sample DialogueID && UtteranceID
sample_utterance = sample['Utterance'].values.tolist() # sample utterance
def functor(f, l): # change str to int function
if isinstance(l,list):
return [functor(f,i) for i in l]
else:
return f(l)
# # change to list
# utterance_lists = list(video_utterances.values())
# emotion_label_lists = list(emotion_labels.values())
# sentiment_label_lists = list(sentiment_labels.values())
# sentiment_label_lists_np = np.array(sentiment_label_lists)
# get detected laughter index
laughter_file_path = glob.glob(detected_folder + '*/laugh_0.wav')
laughter_file = [os.path.basename(os.path.dirname(l)) for l in laughter_file_path]
laughter_index = [l.replace('dia', '').replace('utt','').split('_', 1) for l in laughter_file]
laughter_index = functor(int, laughter_index) # laughter_index = [dialogue_index, utterance_index in dialogue]
laughter_index= sorted(laughter_index, key=lambda x: x[0])
# hold current utterance sentiment & previous utterance sentiment
current_neutral = 0
current_positive = 0
current_negative = 0
previous_neutral = 0
previous_positive = 0
previous_negative = 0
# emotion
# label index mapping = {'neutral': 0, 'surprise': 1, 'fear': 2, 'sadness': 3, 'joy': 4, 'disgust': 5, 'anger': 6}
# current emotion sum
current_neu = 0
current_sur = 0
current_fea = 0
current_sad = 0
current_joy = 0
current_dis = 0
current_ang = 0
# previous emotion sum
pre_neu = 0
pre_sur = 0
pre_fea = 0
pre_sad = 0
pre_joy = 0
pre_dis = 0
pre_ang = 0
# check index error sum
indexerror_sum = 0
for i, l in enumerate(laughter_index):
dia_index = l[0]
utt_index = l[1]
current_df = df[(df['Dialogue_ID'] == dia_index) & (df['Utterance_ID'] == utt_index)]
current_utt = current_df['Utterance'].values.tolist()
current_senti = current_df['Sentiment'].values.tolist()
current_emo = current_df['Emotion'].values.tolist()
try:
if current_senti == ['neutral']:
current_neutral += 1
elif current_senti == ['positive']:
current_positive += 1
elif current_senti == ['negative']:
current_negative += 1
else:
pass
# label index mapping = {'neutral': 0, 'surprise': 1, 'fear': 2, 'sadness': 3, 'joy': 4, 'disgust': 5, 'anger': 6}
if current_emo == ['neutral']:
current_neu += 1
elif current_emo == ['surprise']:
current_sur += 1
elif current_emo == ['fear']:
current_fea += 1
elif current_emo == ['sadness']:
current_sad += 1
elif current_emo == ['joy']:
current_joy += 1
elif current_emo == ['disgust']:
current_dis += 1
elif current_emo == ['anger']:
current_ang += 1
else:
pass
# check previous sentiment
if utt_index > 0:
previous_df = df[(df['Dialogue_ID'] == dia_index) & (df['Utterance_ID'] == (utt_index-1))]
pre_utt = previous_df['Utterance'].values.tolist()
pre_senti = previous_df['Sentiment'].values.tolist()
pre_emo = previous_df['Emotion'].values.tolist()
if pre_senti == ['neutral']:
previous_neutral += 1
elif pre_senti == ['positive']:
previous_positive += 1
elif pre_senti== ['negative']:
previous_negative += 1
else:
pass
# check previous emotion
if pre_emo == ['neutral']:
pre_neu += 1
elif pre_emo == ['surprise']:
pre_sur += 1
elif pre_emo== ['fear']:
pre_fea += 1
elif pre_emo == ['sadness']:
pre_sad += 1
elif pre_emo == ['joy']:
pre_joy += 1
elif pre_emo == ['disgust']:
pre_dis += 1
elif pre_emo == ['anger']:
pre_ang += 1
else:
pass
else:
pass
except IndexError:
indexerror_sum += 1
except IndexError:
indexerror_sum += 1
print('***IndexEroor***')
def whole_senti_graph():
[neutral, positive, negative] = [4710, 2334, 2945]
X = np.array(['neutral', 'positive', 'negative'])
Y = np.array([neutral, positive, negative])
plt.title('Sentiment in Whole Utterance')
plt.pie(Y, labels=X, counterclock=False, startangle=90, autopct="%1.1f%%")
plt.show()
def whole_emo_graph():
[neutral, surprise, fear, sadness, joy, disgust, anger] = [4710, 1205, 268, 683, 1743, 271, 1109]
X = np.array(['neutral', 'surprise', 'fear', 'sadness', 'joy', 'disgust', 'anger'])
Y = np.array([neutral, surprise, fear, sadness, joy, disgust, anger])
plt.title('Emotion in Whole Utterances')
plt.pie(Y, labels=X, counterclock=False, startangle=90, autopct="%1.1f%%")
plt.show()
def current_senti_graph():
print("current_neutral:{} \ncurrent_positive:{}, \ncurrent_negative:{}".format(current_neutral, current_positive, current_negative))
print("IndexError_SUM:{}".format(indexerror_sum))
X = np.array(['current_neutral', 'current_positive', 'current_negative'])
Y = np.array([current_neutral, current_positive, current_negative])
plt.title('Current_Sentiment')
plt.pie(Y, labels=X, counterclock=False, startangle=90, autopct="%1.1f%%")
plt.show()
def previous_senti_graph():
print("previous_neutral:{} \n previous_positive:{}, \n previous_negarive:{}".format(previous_neutral, previous_positive, previous_negative))
X = np.array(['previous_neutral', 'previous_positive', 'previous_negative'])
Y = np.array([previous_neutral, previous_positive, previous_negative])
plt.title('Previous_Sentiment')
plt.pie(Y, labels=X, counterclock=False, startangle=90, autopct="%1.1f%%")
plt.show()
# emotion
# label index mapping = {'neutral': 0, 'surprise': 1, 'fear': 2, 'sadness': 3, 'joy': 4, 'disgust': 5, 'anger': 6}
def current_emo_graph():
print('current_neutral:{}, current_surprise:{}, current_fear:{}, current_sadness:{}, current_joy:{}, current_disgust:{}, current_anger:{}'.format(current_neu, current_sur, current_fea, current_sad, current_joy, current_dis, current_ang))
X = np.array(['neutral', 'surprise', 'fear', 'sadness', 'joy', 'disgust', 'anger'])
Y = np.array([current_neu, current_sur, current_fea, current_sad, current_joy, current_dis, current_ang])
plt.title('Current_Emotion')
plt.pie(Y, labels=X, counterclock=False, startangle=90, autopct="%1.1f%%")
plt.show()
def previous_emo_graph():
print('pre_neutral:{}, pre_surprise:{}, pre_fear:{}, pre_sadness:{}, pre_joy:{}, pre_disgust:{}, pre_anger:{}'.format(pre_neu, pre_sur, pre_fea, pre_sad, pre_joy, pre_dis, pre_ang))
X = | np.array(['neutral', 'surprise', 'fear', 'sadness', 'joy', 'disgust', 'anger']) | numpy.array |
import numpy as np
from kldmwr import distributions
###############################################################################
# Derivatives of h = (1 + xi (x - mu) / sigma)^{-1 / xi}
###############################################################################
def calc_h(x, p):
phi = 1 + p[2] * (x - p[0]) / p[1]
return phi ** (- 1 / p[2])
def calc_h_mu(x, p):
phi = 1 + p[2] * (x - p[0]) / p[1]
return 1 / p[1] * phi ** (- 1 / p[2] - 1)
def calc_h_sg(x, p):
phi = 1 + p[2] * (x - p[0]) / p[1]
return (x - p[0]) / p[1] ** 2 * phi ** (-1 / p[2] - 1)
def calc_h_xi(x, p):
phi = 1 + p[2] * (x - p[0]) / p[1]
return 1 / p[2] ** 2 * phi ** (-1 / p[2]) * np.log(phi) - \
(x - p[0]) / p[1] / p[2] * phi ** (-1 / p[2] - 1)
def calc_h_mumu(x, p):
phi = 1 + p[2] * (x - p[0]) / p[1]
return (1 + p[2]) / p[1] ** 2 * phi ** (-1 / p[2] - 2)
def calc_h_sgsg(x, p):
phi = 1 + p[2] * (x - p[0]) / p[1]
a = - 2 * (x - p[0]) / p[1] ** 3 * phi ** (-1 / p[2] - 1)
b = (1 + p[2]) * (x - p[0]) ** 2 / p[1] ** 4 * phi ** (-1 / p[2] - 2)
return a + b
def calc_h_xixi(x, p):
phi = 1 + p[2] * (x - p[0]) / p[1]
a = - 2 / p[2] ** 3 * phi ** (-1 / p[2]) * np.log(phi)
b = 1 / p[2] ** 4 * phi ** (-1 / p[2]) * np.log(phi) * np.log(phi)
c = - 2 * (x - p[0]) / p[1] / p[2] ** 3 * phi ** (-1 / p[2] - 1) * np.log(
phi)
d = 2 * (x - p[0]) / p[1] / p[2] ** 2 * phi ** (-1 / p[2] - 1)
e = ((x - p[0]) / p[1]) ** 2 * (1 + p[2]) / p[2] ** 2 * phi ** (
- 1 / p[2] - 2)
return a + b + c + d + e
def calc_h_musg(x, p):
phi = 1 + p[2] * (x - p[0]) / p[1]
a = - 1 / p[1] ** 2 * phi ** (-1 / p[2] - 1)
b = (x - p[0]) / p[1] * (1 + p[2]) / p[1] ** 2 * phi ** (-1 / p[2] - 2)
return a + b
def calc_h_muxi(x, p):
phi = 1 + p[2] * (x - p[0]) / p[1]
a = 1 / p[1] / p[2] ** 2 * phi ** (-1 / p[2] - 1) * np.log(phi)
b = - ((x - p[0]) / p[1]) * (1 + p[2]) / p[1] / p[2] * phi ** (
-1 / p[2] - 2)
return a + b
def calc_h_sgxi(x, p):
phi = 1 + p[2] * (x - p[0]) / p[1]
a = (x - p[0]) / p[1] ** 2 / p[2] ** 2 * phi ** (-1 / p[2] - 1) * np.log(
phi)
b = - ((x - p[0]) / p[1]) ** 2 * (1 + p[2]) / p[1] / p[2] * phi ** (
-1 / p[2] - 2)
return a + b
###############################################################################
# Derivatives of G
###############################################################################
def calc_gg(x, p):
return distributions.gev_cdf(x, p)
def calc_gg_mu(x, p):
return - calc_gg(x, p) * calc_h_mu(x, p)
def calc_gg_sg(x, p):
return - calc_gg(x, p) * calc_h_sg(x, p)
def calc_gg_xi(x, p):
return - calc_gg(x, p) * calc_h_xi(x, p)
def calc_gg_mumu(x, p):
return calc_gg(x, p) * \
(- calc_h_mumu(x, p) + calc_h_mu(x, p) * calc_h_mu(x, p))
def calc_gg_sgsg(x, p):
return calc_gg(x, p) * \
(- calc_h_sgsg(x, p) + calc_h_sg(x, p) * calc_h_sg(x, p))
def calc_gg_xixi(x, p):
return calc_gg(x, p) * \
(- calc_h_xixi(x, p) + calc_h_xi(x, p) * calc_h_xi(x, p))
def calc_gg_musg(x, p):
return calc_gg(x, p) * \
(- calc_h_musg(x, p) + calc_h_mu(x, p) * calc_h_sg(x, p))
def calc_gg_muxi(x, p):
return calc_gg(x, p) * \
(- calc_h_muxi(x, p) + calc_h_mu(x, p) * calc_h_xi(x, p))
def calc_gg_sgxi(x, p):
return calc_gg(x, p) * \
(- calc_h_sgxi(x, p) + calc_h_sg(x, p) * calc_h_xi(x, p))
###############################################################################
# D_{k,i} / D_i,
# D_{Kl, i} / D_i
# (D_{k, i} / D_i)(D_{l, i} / D_i)
# l_{kl, i}
###############################################################################
def calc_eff(xv, p):
return np.append([0], np.append(calc_gg(xv, p), [1]))
def calc_eff_p(xv, p):
eff_mu = np.append([0], np.append(calc_gg_mu(xv, p), [0]))
eff_sg = np.append([0], np.append(calc_gg_sg(xv, p), [0]))
eff_xi = np.append([0], np.append(calc_gg_xi(xv, p), [0]))
eff_p = np.concatenate([(eff_mu, eff_sg, eff_xi)], axis=0)
return eff_p
def calc_gg_pp(x, p):
a = np.array(
[[calc_gg_mumu(x, p), calc_gg_musg(x, p), calc_gg_muxi(x, p)],
[calc_gg_musg(x, p), calc_gg_sgsg(x, p), calc_gg_sgxi(x, p)],
[calc_gg_muxi(x, p), calc_gg_sgxi(x, p), calc_gg_xixi(x, p)]])
return a
def calc_eff_pp(xv, p):
z331 = np.zeros((3, 3, 1))
return np.append(z331, np.append(calc_gg_pp(xv, p), z331, axis=-1),
axis=-1)
def calc_dd_p_o_dd(xv, p):
"""Returns D_p,i / D_i
res[k, i] = D_{k, i} / D_i
"""
eff_p = calc_eff_p(xv, p)
dgg_p = np.diff(eff_p, axis=-1)
dff = np.diff(calc_eff(xv, p))
dff_arr = np.concatenate([(dff, dff, dff)], axis=0)
return | np.divide(dgg_p, dff_arr) | numpy.divide |
#!/usr/bin/env python
# coding: utf-8
import enum
try:
import fitz
except:
pass
import os
import json
import glob
import sys
import local_config
sys.path.append(local_config.global_3rd_party)
from os.path import join
from src.ovotools.ovotools.params import AttrDict
import numpy as np
from collections import OrderedDict
import torch
import timeit
import copy
from pathlib import Path
import PIL.ImageDraw
import PIL.ImageFont
from pathlib import Path
import zipfile
import data_utils.data as data
import braille_utils.letters as letters
import braille_utils.label_tools as lt
from model import create_model_retinanet
import pytorch_retinanet
import pytorch_retinanet.encoder
import braille_utils.postprocess as postprocess
inference_width = 1024
model_weights = 'model.t7'
params_fn = join(local_config.data_path, 'weights', 'param.txt')
model_weights_fn = join(local_config.data_path, 'weights', model_weights)
device = 'cuda:0'
#device = 'cpu'
cls_thresh = 0.3
nms_thresh = 0.02
REFINE_COEFFS = [0.083, 0.092, -0.083, -0.013] # Коэффициенты (в единицах h символа) для эмпирической коррекции
# получившихся размеров, чтобы исправить неточность результатов для последующей разметки
class OrientationAttempts(enum.IntEnum):
NONE = 0
ROT180 = 1
INV = 2
INV_ROT180 = 3
ROT90 = 4
ROT270 = 5
INV_ROT90 = 6
INV_ROT270 = 7
class BraileInferenceImpl(torch.nn.Module):
def __init__(self, params, model, device, label_is_valid, verbose=1):
super(BraileInferenceImpl, self).__init__()
self.verbose = verbose
self.device = device
if isinstance(model, torch.nn.Module):
self.model_weights_fn = ""
self.model = model
else:
self.model_weights_fn = model
self.model, _, _ = create_model_retinanet.create_model_retinanet(params, device=device)
self.model = self.model.to(device)
self.model.load_state_dict(torch.load(self.model_weights_fn, map_location = 'cpu'))
self.model.eval()
#self.model = torch.jit.script(self.model)
self.encoder = pytorch_retinanet.encoder.DataEncoder(**params.model_params.encoder_params)
#self.encoder = encoder
self.valid_mask = torch.tensor(label_is_valid).long()
self.cls_thresh = cls_thresh
self.nms_thresh = nms_thresh
self.num_classes = [] if not params.data.get('class_as_6pt', False) else [1]*6
def calc_letter_statistics(self, cls_preds, cls_thresh, orientation_attempts):
# type: (List[Tensor], float)->Tuple[int, Tuple[Tensor, Tensor, Tensor]]
device = cls_preds[min(orientation_attempts)].device
stats = [torch.zeros((1, 64,), device=device)]*8
for i, cls_pred in enumerate(cls_preds):
if i in orientation_attempts:
scores = cls_pred.sigmoid()
scores[scores<cls_thresh] = torch.tensor(0.).to(scores.device)
stat = scores.sum(1)
assert list(stat.shape) == [1, 64]
stats[i] = stat
stat = torch.cat(stats, dim=0)
valid_mask = self.valid_mask.to(stat.device)
sum_valid = (stat*valid_mask).sum(1)
sum_invalid = (stat*(1-valid_mask)).sum(1)
err_score = (sum_invalid+1)/(sum_valid+1)
best_idx = torch.argmin(err_score/(sum_valid+1)) # эвристика так себе придуманная
return best_idx.item(), (err_score, sum_valid, sum_invalid)
def forward(self, input_tensor, input_tensor_rotated, find_orientation, process_2_sides):
# type: (Tensor, Tensor, int)->Tuple[Tensor,Tensor,Tensor,int, Tuple[Tensor, Tensor, Tensor]]
t = timeit.default_timer()
orientation_attempts = [OrientationAttempts.NONE]
if find_orientation:
orientation_attempts += [OrientationAttempts.ROT180, OrientationAttempts.ROT90, OrientationAttempts.ROT270]
if process_2_sides:
orientation_attempts += [OrientationAttempts.INV]
if find_orientation:
orientation_attempts += [OrientationAttempts.INV_ROT180, OrientationAttempts.INV_ROT90, OrientationAttempts.INV_ROT270]
if len(self.num_classes) > 1:
assert not find_orientation and not process_2_sides
input_data = [None]*8
input_data[OrientationAttempts.NONE]= input_tensor.unsqueeze(0)
if find_orientation:
input_data[OrientationAttempts.ROT180] = torch.flip(input_data[OrientationAttempts.NONE], [2,3])
input_data[OrientationAttempts.ROT90] = input_tensor_rotated.unsqueeze(0)
input_data[OrientationAttempts.ROT270] = torch.flip(input_data[OrientationAttempts.ROT90], [2, 3])
if process_2_sides:
input_data[OrientationAttempts.INV] = torch.flip(-input_data[OrientationAttempts.NONE], [3])
if find_orientation:
input_data[OrientationAttempts.INV_ROT180] = torch.flip(-input_data[OrientationAttempts.ROT180], [3])
input_data[OrientationAttempts.INV_ROT90] = torch.flip(-input_data[OrientationAttempts.ROT90], [3])
input_data[OrientationAttempts.INV_ROT270] = torch.flip(-input_data[OrientationAttempts.ROT270], [3])
loc_preds: List[Tensor] = [torch.tensor(0)]*8
cls_preds: List[Tensor] = [torch.tensor(0)]*8
if self.verbose >= 2:
print(" forward.prepare", timeit.default_timer() - t)
t = timeit.default_timer()
for i, input_data_i in enumerate(input_data):
if i in orientation_attempts:
loc_pred, cls_pred = self.model(input_data_i)
loc_preds[i] = loc_pred
cls_preds[i] = cls_pred
if self.verbose >= 2:
print(" forward.model", timeit.default_timer() - t)
t = timeit.default_timer()
if find_orientation:
best_idx, err_score = self.calc_letter_statistics(cls_preds, self.cls_thresh, orientation_attempts)
else:
best_idx, err_score = OrientationAttempts.NONE, (torch.tensor([0.]),torch.tensor([0.]),torch.tensor([0.]))
if self.verbose >= 2:
torch.cuda.synchronize(self.device)
if best_idx in [OrientationAttempts.INV, OrientationAttempts.INV_ROT180, OrientationAttempts.INV_ROT90, OrientationAttempts.INV_ROT270]:
best_idx -= 2
if self.verbose >= 2:
print(" forward.calc_letter_statistics", timeit.default_timer() - t)
t = timeit.default_timer()
h,w = input_data[best_idx].shape[2:]
boxes, labels, scores = self.encoder.decode(loc_preds[best_idx][0].cpu().data,
cls_preds[best_idx][0].cpu().data, (w,h),
cls_thresh = self.cls_thresh, nms_thresh = self.nms_thresh,
num_classes=self.num_classes)
if len(self.num_classes) > 1:
labels = torch.tensor([lt.label010_to_int([str(s.item()+1) for s in lbl101]) for lbl101 in labels])
if process_2_sides:
boxes2, labels2, scores2 = self.encoder.decode(loc_preds[best_idx+2][0].cpu().data,
cls_preds[best_idx+2][0].cpu().data, (w, h),
cls_thresh=self.cls_thresh, nms_thresh=self.nms_thresh,
num_classes=self.num_classes)
else:
boxes2, labels2, scores2 = None, None, None
if self.verbose >= 2:
print(" forward.decode", timeit.default_timer() - t)
t = timeit.default_timer()
return boxes, labels, scores, best_idx, err_score, boxes2, labels2, scores2
class BrailleInference:
DRAW_NONE = 0
DRAW_ORIGINAL = 1
DRAW_REFINED = 2
DRAW_BOTH = DRAW_ORIGINAL | DRAW_REFINED # 3
DRAW_FULL_CHARS = 4
def __init__(self, params_fn=params_fn, model_weights_fn=model_weights_fn, create_script = None,
verbose=1, inference_width=inference_width, device=device):
self.verbose = verbose
if not torch.cuda.is_available() and device != 'cpu':
print('CUDA not availabel. CPU is used')
device = 'cpu'
params = AttrDict.load(params_fn, verbose=verbose)
params.data.net_hw = (inference_width,inference_width,) #(512,768) ###### (1024,1536) #
params.data.batch_size = 1 #######
params.augmentation = AttrDict(
img_width_range=(inference_width, inference_width),
stretch_limit = 0.0,
rotate_limit=0,
)
self.preprocessor = data.ImagePreprocessor(params, mode = 'inference')
if isinstance(model_weights_fn, torch.nn.Module):
self.impl = BraileInferenceImpl(params, model_weights_fn, device, lt.label_is_valid, verbose=verbose)
else:
model_script_fn = model_weights_fn + '.pth'
if create_script != False:
self.impl = BraileInferenceImpl(params, model_weights_fn, device, lt.label_is_valid, verbose=verbose)
if create_script is not None:
self.impl = torch.jit.script(self.impl)
if isinstance(self.impl, torch.jit.ScriptModule):
torch.jit.save(self.impl, model_script_fn)
if verbose >= 1:
print("Model loaded and saved to " + model_script_fn)
else:
if verbose >= 1:
print("Model loaded")
else:
self.impl = torch.jit.load(model_script_fn)
if verbose >= 1:
print("Model pth loaded")
self.impl.to(device)
def load_pdf(self, img_fn):
try:
pdf_file = fitz.open(img_fn)
pg = pdf_file.loadPage(0)
pdf = pg.getPixmap()
cspace = pdf.colorspace
if cspace is None:
mode = "L"
elif cspace.n == 1:
mode = "L" if pdf.alpha == 0 else "LA"
elif cspace.n == 3:
mode = "RGB" if pdf.alpha == 0 else "RGBA"
else:
mode = "CMYK"
img = PIL.Image.frombytes(mode, (pdf.width, pdf.height), pdf.samples)
return img
except Exception as exc:
return None
def run(self, img, lang, draw_refined, find_orientation, process_2_sides, align_results, repeat_on_aligned=True, gt_rects=[]):
"""
:param img: can be 1) PIL.Image 2) filename to image (.jpg etc.) or .pdf file
"""
if gt_rects:
assert find_orientation == False, "gt_rects можно передавать только если ориентация задана"
t = timeit.default_timer()
if not isinstance(img, PIL.Image.Image):
try:
if Path(img).suffix=='.pdf':
img = self.load_pdf(img)
else:
img = PIL.Image.open(img)
except Exception as e:
return None
if self.verbose >= 2:
print("run.reading image", timeit.default_timer() - t)
# img.save(Path(results_dir) / 'original.jpg')
# img.save(Path(results_dir) / 'original_100.jpg', quality=100)
t = timeit.default_timer()
if repeat_on_aligned and not process_2_sides:
results_dict0 = self.run_impl(img, lang, draw_refined, find_orientation,
process_2_sides=False, align=True, draw=False, gt_rects=gt_rects)
if self.verbose >= 2:
print("run.run_impl_1", timeit.default_timer() - t)
# results_dict0['image'].save(Path(results_dir) / 're1.jpg')
# results_dict0['image'].save(Path(results_dir) / 're1_100.jpg', quality=100)
t = timeit.default_timer()
results_dict = self.run_impl(results_dict0['image'], lang, draw_refined, find_orientation=False,
process_2_sides=process_2_sides, align=False, draw=True,
gt_rects=results_dict0['gt_rects'])
results_dict['best_idx'] = results_dict0['best_idx']
results_dict['err_scores'] = results_dict0['err_scores']
results_dict['homography'] = results_dict0['homography']
else:
results_dict = self.run_impl(img, lang, draw_refined, find_orientation,
process_2_sides=process_2_sides, align=align_results, draw=True, gt_rects=gt_rects)
if self.verbose >= 2:
# results_dict['image'].save(Path(results_dir) / 're2.jpg')
# results_dict['image'].save(Path(results_dir) / 're2_100.jpg', quality=100)
print("run.run_impl", timeit.default_timer() - t)
return results_dict
# def refine_boxes(self, boxes):
# """
# GVNC. Эмпирическая коррекция получившихся размеров чтобы исправить неточность результатов для последующей разметки
# :param boxes:
# :return:
# """
# h = boxes[:, 3:4] - boxes[:, 1:2]
# coefs = torch.tensor([REFINE_COEFFS])
# deltas = h * coefs
# return boxes + deltas
def refine_lines(self, lines):
"""
GVNC. Эмпирическая коррекция получившихся размеров чтобы исправить неточность результатов для последующей разметки
:param boxes:
:return:
"""
for ln in lines:
for ch in ln.chars:
h = ch.refined_box[3] - ch.refined_box[1]
coefs = np.array(REFINE_COEFFS)
deltas = h * coefs
ch.refined_box = ( | np.array(ch.refined_box) | numpy.array |
import numpy as np
import os
import re
import requests
import sys
import time
from netCDF4 import Dataset
import pandas as pd
from bs4 import BeautifulSoup
from tqdm import tqdm
# setup constants used to access the data from the different M2M interfaces
BASE_URL = 'https://ooinet.oceanobservatories.org/api/m2m/' # base M2M URL
SENSOR_URL = '12576/sensor/inv/' # Sensor Information
# setup access credentials
AUTH = ['OOIAPI-853A3LA6QI3L62', '<KEY>']
def M2M_Call(uframe_dataset_name, start_date, end_date):
options = '?beginDT=' + start_date + '&endDT=' + end_date + '&format=application/netcdf'
r = requests.get(BASE_URL + SENSOR_URL + uframe_dataset_name + options, auth=(AUTH[0], AUTH[1]))
if r.status_code == requests.codes.ok:
data = r.json()
else:
return None
# wait until the request is completed
print('Waiting for OOINet to process and prepare data request, this may take up to 20 minutes')
url = [url for url in data['allURLs'] if re.match(r'.*async_results.*', url)][0]
check_complete = url + '/status.txt'
with tqdm(total=400, desc='Waiting') as bar:
for i in range(400):
r = requests.get(check_complete)
bar.update(1)
if r.status_code == requests.codes.ok:
bar.n = 400
bar.last_print_n = 400
bar.refresh()
print('\nrequest completed in %f minutes.' % elapsed)
break
else:
time.sleep(3)
elapsed = (i * 3) / 60
return data
def M2M_Files(data, tag=''):
"""
Use a regex tag combined with the results of the M2M data request to collect the data from the THREDDS catalog.
Collected data is gathered into an xarray dataset for further processing.
:param data: JSON object returned from M2M data request with details on where the data is to be found for download
:param tag: regex tag to use in discriminating the data files, so we only collect the correct ones
:return: the collected data as an xarray dataset
"""
# Create a list of the files from the request above using a simple regex as a tag to discriminate the files
url = [url for url in data['allURLs'] if re.match(r'.*thredds.*', url)][0]
files = list_files(url, tag)
return files
def list_files(url, tag=''):
"""
Function to create a list of the NetCDF data files in the THREDDS catalog created by a request to the M2M system.
:param url: URL to user's THREDDS catalog specific to a data request
:param tag: regex pattern used to distinguish files of interest
:return: list of files in the catalog with the URL path set relative to the catalog
"""
page = requests.get(url).text
soup = BeautifulSoup(page, 'html.parser')
pattern = re.compile(tag)
return [node.get('href') for node in soup.find_all('a', text=pattern)]
def M2M_Data(nclist,variables):
thredds = 'https://opendap.oceanobservatories.org/thredds/dodsC/ooi/'
#nclist is going to contain more than one url eventually
for jj in range(len(nclist)):
url=nclist[jj]
url=url[25:]
dap_url = thredds + url + '#fillmismatch'
openFile = Dataset(dap_url,'r')
for ii in range(len(variables)):
dum = openFile.variables[variables[ii].name]
variables[ii].data = np.append(variables[ii].data, dum[:].data)
tmp = variables[0].data/60/60/24
time_converted = pd.to_datetime(tmp, unit='D', origin=pd.Timestamp('1900-01-01'))
return variables, time_converted
class var(object):
def __init__(self):
"""A Class that generically holds data with a variable name
and the units as attributes"""
self.name = ''
self.data = np.array([])
self.units = ''
def __repr__(self):
return_str = "name: " + self.name + '\n'
return_str += "units: " + self.units + '\n'
return_str += "data: size: " + str(self.data.shape)
return return_str
class structtype(object):
def __init__(self):
""" A class that imitates a Matlab structure type
"""
self._data = []
def __getitem__(self, index):
"""implement index behavior in the struct"""
if index == len(self._data):
self._data.append(var())
return self._data[index]
def __len__(self):
return len(self._data)
def M2M_URLs(platform_name,node,instrument_class,method):
var_list = structtype()
#MOPAK
if platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSPM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/SBS01/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#METBK
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
#FLORT
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/06-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/06-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/04-FLORTK000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
#FDCHP
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/08-FDCHPA000/telemetered/fdchp_a_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#DOSTA
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/02-DOFSTK000/telemetered/dofst_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
#ADCP
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/01-ADCPTA000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/01-ADCPTC000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/01-ADCPTA000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/01-ADCPTC000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/04-ADCPTM000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/04-ADCPTM000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/04-ADCPTC000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/04-ADCPSJ000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
#ZPLSC
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#WAVSS
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
#VELPT
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
#PCO2W
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
#PHSEN
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
#SPKIR
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
#PRESF
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/02-PRESFA000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/02-PRESFA000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/02-PRESFB000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/02-PRESFC000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
#CTDBP
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/06-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/06-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/03-CTDBPE000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
#VEL3D
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
#VEL3DK
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/01-VEL3DK000/telemetered/vel3d_k_wfp_stc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/03-CTDPFK000/telemetered/ctdpf_ckl_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
#PCO2A
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
#PARAD
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/05-PARADK000/telemetered/parad_k__stc_imodem_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
#OPTAA
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/01-OPTAAC000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#NUTNR
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
##
#MOPAK
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/SBD17/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/SBD17/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSPM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSPM/SBS01/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#METBK
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
#FLORT
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/SBD17/06-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/SBD17/06-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
#FDCHP
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/08-FDCHPA000/recovered_host/fdchp_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#DOSTA
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD37/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD37/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD37/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD37/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
#ADCP
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/01-ADCPTA000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/01-ADCPTC000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/01-ADCPTA000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/01-ADCPTC000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/04-ADCPTM000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/04-ADCPTM000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/04-ADCPTC000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/04-ADCPSJ000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
#WAVSS
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
#VELPT
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/SBD17/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD11/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD11/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
#uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/recovered_host/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD11/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD11/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
#PCO2W
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
#PHSEN
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
#SPKIR
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
#PRESF
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/02-PRESFA000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/02-PRESFA000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/02-PRESFB000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/02-PRESFC000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
#CTDBP
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD37/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/SBD17/06-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD37/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/SBD17/06-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD37/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD37/03-CTDBPE000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
#VEL3D
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/01-VEL3DD000/recovered_host/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/01-VEL3DD000/recovered_host/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/01-VEL3DD000/recovered_host/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/01-VEL3DD000/recovered_host/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
#PCO2A
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
#OPTAA
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD37/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD37/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD37/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD37/01-OPTAAC000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#NUTNR
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD37/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/SBD17/06-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD37/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/SBD17/06-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD37/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD37/03-CTDBPE000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE09OSPM/WFP01/03-CTDPFK000/recovered_wfp/ctdpf_ckl_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID26/01-ADCPTA000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID26/01-ADCPTC000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID26/01-ADCPTA000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID26/01-ADCPTC000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/04-ADCPTM000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/04-ADCPTM000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD35/04-ADCPTC000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD35/04-ADCPSJ000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/SBD17/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/SBD11/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/SBD11/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/SBD17/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/SBD11/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/SBD11/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE09OSPM/WFP01/01-VEL3DK000/recovered_wfp/vel3d_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/01-VEL3DD000/recovered_inst/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/01-VEL3DD000/recovered_inst/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD35/01-VEL3DD000/recovered_inst/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD35/01-VEL3DD000/recovered_inst/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/02-PRESFA000/recovered_inst/presf_abc_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'presf_tide_pressure'
var_list[2].name = 'presf_tide_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/02-PRESFA000/recovered_inst/presf_abc_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'presf_tide_pressure'
var_list[2].name = 'presf_tide_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD35/02-PRESFB000/recovered_inst/presf_abc_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'presf_tide_pressure'
var_list[2].name = 'presf_tide_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD35/02-PRESFC000/recovered_inst/presf_abc_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'presf_tide_pressure'
var_list[2].name = 'presf_tide_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD35/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD35/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD35/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD35/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE09OSPM/WFP01/05-PARADK000/recovered_wfp/parad_k__stc_imodem_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID26/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID26/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID26/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID26/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/SBD12/08-FDCHPA000/recovered_inst/fdchp_a_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/SBD17/06-FLORTD000/recovered_inst/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/SBD17/06-FLORTD000/recovered_inst/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE09OSPM/WFP01/04-FLORTK000/recovered_wfp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE09OSPM/WFP01/02-DOFSTK000/recovered_wfp/dofst_k_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD37/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD37/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD37/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD37/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/04-ADCPTM000/recovered_inst/adcpt_m_instrument_log9_recovered'
var_list[0].name = 'time'
var_list[1].name = 'significant_wave_height'
var_list[2].name = 'peak_wave_period'
var_list[3].name = 'peak_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'seconds'
var_list[3].units = 'degrees'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/04-ADCPTM000/recovered_inst/adcpt_m_instrument_log9_recovered'
var_list[0].name = 'time'
var_list[1].name = 'significant_wave_height'
var_list[2].name = 'peak_wave_period'
var_list[3].name = 'peak_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'seconds'
var_list[3].units = 'degrees'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'CTD' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/06-CTDBPN106/streamed/ctdbp_no_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_no_seawater_pressure'
var_list[5].name = 'ctdbp_no_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'CTD' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/06-CTDBPO108/streamed/ctdbp_no_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_no_seawater_pressure'
var_list[5].name = 'ctdbp_no_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'DOSTA' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/06-CTDBPN106/streamed/ctdbp_no_sample'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'DOSTA' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/06-CTDBPO108/streamed/ctdbp_no_sample'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'PHSEN' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/10-PHSEND103/streamed/phsen_data_record'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'PHSEN' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/10-PHSEND107/streamed/phsen_data_record'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'PCO2W' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/09-PCO2WB103/streamed/pco2w_b_sami_data_record'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'PCO2W' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/09-PCO2WB104/streamed/pco2w_b_sami_data_record'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'ADCP' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/05-ADCPTB104/streamed/adcp_velocity_beam'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'ADCP' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/05-ADCPSI103/streamed/adcp_velocity_beam'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'VEL3D' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/07-VEL3DC108/streamed/vel3d_cd_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'VEL3D' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/07-VEL3DC107/streamed/vel3d_cd_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'OPTAA' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/08-OPTAAD106/streamed/optaa_sample'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'OPTAA' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/08-OPTAAC104/streamed/optaa_sample'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#CSPP Data below
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/08-FLORTJ000/telemetered/flort_dj_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/08-FLORTJ000/recovered_cspp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/08-FLORTJ000/telemetered/flort_dj_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/08-FLORTJ000/recovered_cspp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/02-DOSTAJ000/telemetered/dosta_abcdjm_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/02-DOSTAJ000/recovered_cspp/dosta_abcdjm_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/02-DOSTAJ000/telemetered/dosta_abcdjm_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/02-DOSTAJ000/recovered_cspp/dosta_abcdjm_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/09-CTDPFJ000/telemetered/ctdpf_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/09-CTDPFJ000/recovered_cspp/ctdpf_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/09-CTDPFJ000/telemetered/ctdpf_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/09-CTDPFJ000/recovered_cspp/ctdpf_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/10-PARADJ000/telemetered/parad_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/10-PARADJ000/recovered_cspp/parad_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/10-PARADJ000/telemetered/parad_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/10-PARADJ000/recovered_cspp/parad_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'NUTNR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/06-NUTNRJ000/recovered_cspp/nutnr_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'salinity_corrected_nitrate'
var_list[2].name = 'nitrate_concentration'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'NUTNR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/06-NUTNRJ000/recovered_cspp/nutnr_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'salinity_corrected_nitrate'
var_list[2].name = 'nitrate_concentration'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/07-SPKIRJ000/telemetered/spkir_abj_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/07-SPKIRJ000/recovered_cspp/spkir_abj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/07-SPKIRJ000/telemetered/spkir_abj_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/07-SPKIRJ000/recovered_cspp/spkir_abj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/05-VELPTJ000/telemetered/velpt_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/05-VELPTJ000/recovered_cspp/velpt_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/05-VELPTJ000/telemetered/velpt_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/05-VELPTJ000/recovered_cspp/velpt_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'OPTAA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/04-OPTAAJ000/recovered_cspp/optaa_dj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'OPTAA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/04-OPTAAJ000/recovered_cspp/optaa_dj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/07-FLORTJ000/recovered_cspp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/07-FLORTJ000/recovered_cspp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/01-DOSTAJ000/recovered_cspp/dosta_abcdjm_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/01-DOSTAJ000/recovered_cspp/dosta_abcdjm_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/08-CTDPFJ000/recovered_cspp/ctdpf_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/08-CTDPFJ000/recovered_cspp/ctdpf_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/09-PARADJ000/recovered_cspp/parad_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/09-PARADJ000/recovered_cspp/parad_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'NUTNR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/05-NUTNRJ000/recovered_cspp/nutnr_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'salinity_corrected_nitrate'
var_list[2].name = 'nitrate_concentration'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'NUTNR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/05-NUTNRJ000/recovered_cspp/nutnr_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'salinity_corrected_nitrate'
var_list[2].name = 'nitrate_concentration'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/06-SPKIRJ000/recovered_cspp/spkir_abj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/06-SPKIRJ000/recovered_cspp/spkir_abj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/02-VELPTJ000/recovered_cspp/velpt_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/02-VELPTJ000/recovered_cspp/velpt_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'OPTAA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/04-OPTAAJ000/recovered_cspp/optaa_dj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'OPTAA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/04-OPTAAJ000/recovered_cspp/optaa_dj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL386/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL386/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL384/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL384/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL383/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL383/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL382/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL382/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL381/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL381/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL327/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL327/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL326/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL326/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL320/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL320/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL319/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL319/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL312/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL312/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL311/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL311/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL247/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL247/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL386/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL386/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL384/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL384/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL383/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL383/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL382/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL382/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL381/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL381/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL327/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL327/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL326/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL326/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL320/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL320/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL319/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL319/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL312/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL312/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL311/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL311/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL247/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL247/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL386/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL386/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL384/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL384/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL383/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL383/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL382/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL382/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL381/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL381/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL327/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL327/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL326/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL326/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL320/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL320/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL319/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL319/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL312/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL312/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL311/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL311/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL247/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL247/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL386/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL386/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL384/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL384/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL383/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL383/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL382/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL382/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL381/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL381/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL327/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL327/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL326/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL326/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL320/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL320/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL319/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL319/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL312/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL312/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL311/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL311/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL247/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL247/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL386/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL384/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL383/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL382/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL381/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL327/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL326/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL320/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL319/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL312/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL311/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL247/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD11/06-METBKA000/telemetered/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD11/06-METBKA000/recovered_host/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD11/06-METBKA000/telemetered/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD11/06-METBKA000/recovered_host/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD11/06-METBKA000/telemetered/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD11/06-METBKA000/recovered_host/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD11/06-METBKA000/telemetered/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD11/06-METBKA000/recovered_host/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_mean_directional'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_mean_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_mean_directional'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_mean_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_mean_directional'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_mean_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_mean_directional'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_mean_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_non_directional'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_non_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_non_directional'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_non_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_non_directional'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_non_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_non_directional'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_non_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_motion'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_motion_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_motion'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_motion_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_motion'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_motion_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_motion'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_motion_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_fourier'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_fourier_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_fourier'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_fourier_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_fourier'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_fourier_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_fourier'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_fourier_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/2A-CTDPFA107/streamed/ctdpf_sbe43_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'seawater_pressure'
var_list[5].name = 'seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSPD' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSPD/DP01B/01-CTDPFL105/recovered_inst/dpc_ctd_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'dpc_ctd_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSPD' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE04OSPD/DP01B/01-CTDPFL105/recovered_wfp/dpc_ctd_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'dpc_ctd_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/2A-CTDPFA107/streamed/ctdpf_sbe43_sample'
var_list[0].name = 'time'
var_list[1].name = 'corrected_dissolved_oxygen'
var_list[2].name = 'seawater_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'dbar'
elif platform_name == 'CE04OSPD' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSPD/DP01B/06-DOSTAD105/recovered_inst/dpc_optode_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE04OSPD' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE04OSPD/DP01B/06-DOSTAD105/recovered_wfp/dpc_optode_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/3A-FLORTD104/streamed/flort_d_data_record'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE04OSPD' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSPD/DP01B/04-FLNTUA103/recovered_inst/dpc_flnturtd_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'flntu_x_mmp_cds_fluorometric_chlorophyll_a'
var_list[2].name = 'flntu_x_mmp_cds_total_volume_scattering_coefficient '
var_list[3].name = 'flntu_x_mmp_cds_bback_total'
var_list[4].name = 'flcdr_x_mmp_cds_fluorometric_cdom'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'ug/L'
var_list[2].units = 'm-1 sr-1'
var_list[3].units = 'm-1'
var_list[4].units = 'ppb'
var_list[5].units = 'dbar'
elif platform_name == 'CE04OSPD' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE04OSPD/DP01B/03-FLCDRA103/recovered_wfp/dpc_flcdrtd_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'flntu_x_mmp_cds_fluorometric_chlorophyll_a'
var_list[2].name = 'flntu_x_mmp_cds_total_volume_scattering_coefficient '
var_list[3].name = 'flntu_x_mmp_cds_bback_total'
var_list[4].name = 'flcdr_x_mmp_cds_fluorometric_cdom'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'ug/L'
var_list[2].units = 'm-1 sr-1'
var_list[3].units = 'm-1'
var_list[4].units = 'ppb'
var_list[5].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'PHSEN' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/2B-PHSENA108/streamed/phsen_data_record'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'ph_seawater'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/3C-PARADA102/streamed/parad_sa_sample'
var_list[0].name = 'time'
var_list[1].name = 'par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/3D-SPKIRA102/streamed/spkir_data_record'
var_list[0].name = 'time'
var_list[1].name = 'spkir_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = | np.array([]) | numpy.array |
import numpy as np
from multiprocessing import Pool
from functools import partial
from tqdm import trange
from sys import exit
from PreFRBLE.convenience import *
from PreFRBLE.parameter import *
from PreFRBLE.physics import *
from PreFRBLE.LikelihoodFunction import LikelihoodFunction
from PreFRBLE.Scenario import Scenario
############################################################################
############### MATHEMATICAL LIKELIHOOD STANDARD OPERATIONS ################
############################################################################
### !!! depreceated, remove
def Likelihood( data=np.arange(1,3), bins=10, range=None, density=True, log=False, weights=None, **kwargs ):
""" wrapper for numpy.histogram that allows for log-scaled probability density function, used to compute likelihood function """
if log:
if range is not None:
range = np.log10(range)
h, x = np.histogram( np.log10(np.abs(data)), bins=bins, range=range, weights=weights )
x = 10.**x
h = h.astype('float64')
if density:
h = h / ( np.sum( h )*np.diff(x) )
else:
if range is None:
range = ( np.min(data), np.max(data) )
h, x = np.histogram( data, bins=bins, range=range, density=density, weights=weights )
L = LikelihoodFunction( P=h, x=x, **kwargs )
return L
# return h, x
Histogram = Likelihood ## old name, replace everywhere
### !!! depreceated, remove
def LikelihoodSmooth( P=[], x=[], dev=[], mode='MovingAverage' ):
"""
Smooth likelihood function P(x)
modes available:
MovingAverage : smooth using moving average over 5 neighbouring boxes
"""
norm = LikelihoodNorm( P=P, x=x, dev=dev )
if mode == 'MovingAverage':
box_pts = 5
P = np.convolve( P, np.ones(box_pts)/box_pts, mode='same' )
## smoothing doesn't conserve normalization
P *= norm/LikelihoodNorm( P=P, x=x, dev=dev )
res = [P, x]
if len(dev)>0:
res.append(dev)
return res
### !!! depreceated, remove
def LikelihoodNorm( P=[], x=[], dev=[] ):
""" Compute norm of likelihood function P """
return np.sum(P*np.diff(x))
### !!! depreceated, remove
def LikelihoodDeviation( P=[], x=[], N=1 ):
""" compute relative deviation (Poisson noise) of likelihood function of individual model obtained from sample of N events """
res = ( P*np.diff(x)*N )**-0.5
res[ | np.isinf(res) | numpy.isinf |
from .utils.coordinates_helper import view2pixel
from .neural_network.NeuralNetwork import NeuralNetwork
from .utils.sky_extractor import extract_sky
from .utils.fixes import fix_dead_pixels
from datetime import datetime, timedelta
from matplotlib import path
import matplotlib.patches as patches
import matplotlib.pyplot as plt
import matplotlib.widgets as widgets
import numpy as np
from os import walk
from os.path import join, splitext
import cv2
from PIL import Image
IMAGES_EXT = '.jpg'
ORIGINAL_WIDTH = 5184
ORIGINAL_HEIGHT = 3456
CROP_SIZE = 2300
NETWORK_INPUT_SIZE = 512
NETWORK_OUTPUT_SIZE = 128
BATCH_SIZE = 1
def preprocess_images(images):
preprocessed_images = []
for image in images:
preprocessed = fix_dead_pixels(image)
preprocessed = extract_sky(preprocessed, NETWORK_INPUT_SIZE)
preprocessed = np.divide(preprocessed, 255)
preprocessed_images.append(preprocessed)
return preprocessed_images
g_display_images = False
end = False
def turn_off_images(_):
global g_display_images
g_display_images = False
plt.close()
def stop_processing(_):
global end
end = True
plt.close()
def next_image(_):
plt.close()
def estimate_cloudiness(image_paths, coordinates, display_images):
global g_display_images
global end
g_display_images = display_images
percentages = []
input_coordinates = []
output_coordinates = []
for x, y in coordinates:
input_coordinates.append((
round((x - (ORIGINAL_WIDTH - CROP_SIZE) / 2)
/ (CROP_SIZE / NETWORK_INPUT_SIZE)),
round((y - (ORIGINAL_HEIGHT - CROP_SIZE) / 2)
/ (CROP_SIZE / NETWORK_INPUT_SIZE))
))
output_coordinates.append((
round((x - (ORIGINAL_WIDTH - CROP_SIZE) / 2)
/ (CROP_SIZE / NETWORK_OUTPUT_SIZE)),
round((y - (ORIGINAL_HEIGHT - CROP_SIZE) / 2)
/ (CROP_SIZE / NETWORK_OUTPUT_SIZE))
))
output_polygon = path.Path(output_coordinates)
neural_network = NeuralNetwork()
for i in range(0, len(image_paths), BATCH_SIZE):
print('Processing {}/{}'.format(i + 1, len(image_paths)))
images = []
for j in range(BATCH_SIZE):
if i + j >= len(image_paths):
break
image = cv2.imread(image_paths[i + j])
images.append(image)
preprocessed_images = preprocess_images(images)
cloud_outputs = neural_network.run(preprocessed_images)
for i, cloud_output in enumerate(cloud_outputs[0]):
points_inside = 0
cloudiness = 0
for y in range(cloud_output.shape[0]):
for x in range(cloud_output.shape[1]):
if output_polygon.contains_point((x, y)):
cloudiness += cloud_output[y, x, 0]
points_inside += 1
if points_inside > 0:
percentages.append(int(round(100 * cloudiness / points_inside)))
else:
print('ERROR: Invalid coordinates or image')
percentages.append(-1)
if g_display_images:
plt.figure(figsize=(10, 5))
plt.figtext(0.43, 0.94, 'Cloudiness: ' + str(percentages[-1]) + '%', size='x-large')
subplot = plt.subplot(121)
plt.imshow(preprocessed_images[i][:, :, ::-1])
plt.axis('off')
plt.title('Night Sky', size='medium')
input_polygon_patch = patches.Polygon( | np.array(input_coordinates) | numpy.array |
# Copyright 2016-2018, Rigetti Computing
#
# This source code is licensed under the Apache License, Version 2.0 found in
# the LICENSE.txt file in the root directory of this source tree.
"""
QuantumFlow Gate Decompositions
"""
from typing import Sequence, Tuple
import itertools
import numpy as np
from numpy import pi
from .qubits import asarray
from .config import TOLERANCE
from .gates import Gate
from .measures import gates_close
from .stdgates import RN, CANONICAL, TZ, TY
from .circuits import Circuit
__all__ = ['bloch_decomposition',
'zyz_decomposition',
'kronecker_decomposition',
'canonical_decomposition',
'canonical_coords']
def bloch_decomposition(gate: Gate) -> Circuit:
"""
Converts a 1-qubit gate into a RN gate, a 1-qubit rotation of angle theta
about axis (nx, ny, nz) in the Bloch sphere.
Returns:
A Circuit containing a single RN gate
"""
if gate.qubit_nb != 1:
raise ValueError('Expected 1-qubit gate')
U = asarray(gate.asoperator())
U /= np.linalg.det(U) ** (1/2)
nx = - U[0, 1].imag
ny = - U[0, 1].real
nz = - U[0, 0].imag
N = np.sqrt(nx**2 + ny**2 + nz**2)
if N == 0: # Identity
nx, ny, nz = 1, 1, 1
else:
nx /= N
ny /= N
nz /= N
sin_halftheta = N
cos_halftheta = U[0, 0].real
theta = 2 * np.arctan2(sin_halftheta, cos_halftheta)
# We return a Circuit (rather than just a gate) to keep the
# interface of decomposition routines uniform.
return Circuit([RN(theta, nx, ny, nz, *gate.qubits)])
# DOCME TESTME
def zyz_decomposition(gate: Gate) -> Circuit:
"""
Returns the Euler Z-Y-Z decomposition of a local 1-qubit gate.
"""
if gate.qubit_nb != 1:
raise ValueError('Expected 1-qubit gate')
q, = gate.qubits
U = asarray(gate.asoperator())
U /= np.linalg.det(U) ** (1/2) # SU(2)
if abs(U[0, 0]) > abs(U[1, 0]):
theta1 = 2 * np.arccos(min(abs(U[0, 0]), 1))
else:
theta1 = 2 * np.arcsin(min(abs(U[1, 0]), 1))
cos_halftheta1 = np.cos(theta1/2)
if not np.isclose(cos_halftheta1, 0.0):
phase = U[1, 1] / cos_halftheta1
theta0_plus_theta2 = 2 * np.arctan2(np.imag(phase), np.real(phase))
else:
theta0_plus_theta2 = 0.0
sin_halftheta1 = np.sin(theta1/2)
if not np.isclose(sin_halftheta1, 0.0):
phase = U[1, 0] / sin_halftheta1
theta0_sub_theta2 = 2 * np.arctan2(np.imag(phase), np.real(phase))
else:
theta0_sub_theta2 = 0.0
theta0 = (theta0_plus_theta2 + theta0_sub_theta2) / 2
theta2 = (theta0_plus_theta2 - theta0_sub_theta2) / 2
t0 = theta0/np.pi
t1 = theta1/np.pi
t2 = theta2/np.pi
circ1 = Circuit()
circ1 += TZ(t2, q)
circ1 += TY(t1, q)
circ1 += TZ(t0, q)
return circ1
def kronecker_decomposition(gate: Gate) -> Circuit:
"""
Decompose a 2-qubit unitary composed of two 1-qubit local gates.
Uses the "Nearest Kronecker Product" algorithm. Will give erratic
results if the gate is not the direct product of two 1-qubit gates.
"""
# An alternative approach would be to take partial traces, but
# this approach appears to be more robust.
if gate.qubit_nb != 2:
raise ValueError('Expected 2-qubit gate')
U = asarray(gate.asoperator())
rank = 2**gate.qubit_nb
U /= np.linalg.det(U) ** (1/rank)
R = np.stack([U[0:2, 0:2].reshape(4),
U[0:2, 2:4].reshape(4),
U[2:4, 0:2].reshape(4),
U[2:4, 2:4].reshape(4)])
u, s, vh = np.linalg.svd(R)
v = vh.transpose()
A = ( | np.sqrt(s[0]) | numpy.sqrt |
import copy
import gc
import numpy
from numpy.linalg import LinAlgError
import joblib
import pandas
import psutil
import pygmo
from scipy.optimize import minimize
from scipy.optimize import differential_evolution
import time
from typing import Dict, List, Tuple
import warnings
from .constants import Constants
from .constants import Messages
from .datatypes import Measurement
from .datatypes import Sensitivity
from .generalized_islands import ArchipelagoHelpers
from .generalized_islands import LossCalculator
from .generalized_islands import ParallelEstimationInfo
from .model_checking import ModelChecker
from .oed import CovOptimality
from .parameter import Parameter
from .parameter import ParameterMapper
from .parameter import ParameterManager
from .simulation import ExtendedSimulator
from .utils import Calculations
from .utils import OwnDict
from .utils import Helpers
EPS64 = Constants.eps_float64
PRETTY_METRICS = Constants.pretty_metrics
SINGLE_ID = Constants.single_id
class Caretaker():
"""
Manages (takes care of) all major methods related to simulation, estimation,
and evaluation of dynamic bioprocess models and its observation functions.
Exposes several convient methods of the individual classes in this module.
"""
def __init__(self,
bioprocess_model_class, model_parameters, states:list=None, initial_values:dict=None, replicate_ids:list=None,
initial_switches:list=None, model_name:str=None, observation_functions_parameters:List[tuple]=None,
model_checking_assistance:bool=True,
):
"""
Arguments
---------
bioprocess_model_class : Subclass of BioprocessModel
This class implements the bioprocess model.
model_parameters : list or dict
The model parameters, as specified in `bioprocess_model_class`.
Keyword arguments
-----------------
states : list
The model states, as specified in `bioprocess_model_class`.
Default is none, which enforces `initial_values` not to be None.
initial_values : dict
Initial values to the model, keys must match the states with a trailing '0'.
Default is None, which enforces `states` not to be None.
replicate_ids : list
Unique ids of replicates, for which the full model applies.
The parameters, as specified for the model and observation functions are considered as global ones,
which may have different names and values for each replicate.
Default is None, which implies a single replicate model.
initial_switches : list
A list of booleans, indicating the initial state of switches.
Number of switches must correpond to the number of return events in method `state_events`,
if this method is implemented by the inheriting class.
Default is None, which enables auto-detection of initial switches, which all will be False.
model_name : str
A descriptive model name.
Default is None.
observation_funtions_parameters : list of tuples
Each tuple stores a subclass of ObservationFunction
and a dictionary of its correponding parametrization.
Default is None, which implies that there are no ObservationFunctions.
model_checking_assistance : bool
Runs a few sanity and call checks on the implemented model
"""
# store arguements for later use
self.__bioprocess_model_class = bioprocess_model_class
self.__model_parameters = model_parameters
self.__states = states
self.__initial_values = initial_values
self.__replicate_ids = replicate_ids
self.__initial_switches = initial_switches
self.__model_name = model_name
self.__observation_functions_parameters = observation_functions_parameters
self.replicate_ids = replicate_ids
if model_name is None:
self.model_name = bioprocess_model_class.__name__
else:
self.model_name = model_name
# Create an ExtendendSimulator instance for each replicate id
model_checker = ModelChecker()
self.simulators = {}
for _replicate_id in self.replicate_ids:
if _replicate_id is None:
_model_name = model_name
else:
_model_name = f'{model_name}_{_replicate_id}'
_simulator = ExtendedSimulator(
bioprocess_model_class,
model_parameters,
states,
initial_values,
initial_switches,
_model_name,
observation_functions_parameters,
_replicate_id,
)
if model_checking_assistance:
if not model_checker.check_model_consistency(copy.deepcopy(_simulator)):
warnings.warn(f'There might by some issues for {_model_name} with replicate_id {_replicate_id}')
self.simulators[_replicate_id] = _simulator
# Create a ParameterManager object
self._parameter_manager = ParameterManager(
self.replicate_ids,
self.simulators[self.replicate_ids[0]].get_all_parameters(),
)
self.optimizer_kwargs = None
#%% Properties
@property
def replicate_ids(self) -> list:
return self._replicate_ids
@replicate_ids.setter
def replicate_ids(self, value):
if value is None:
self._replicate_ids = [SINGLE_ID]
else:
if not Helpers.has_unique_ids(value):
raise ValueError(Messages.non_unique_ids)
self._replicate_ids = value
@property
def parameter_mapping(self):
return self._parameter_manager.parameter_mapping
@property
def optimizer_kwargs(self) -> dict:
return self._optimizer_kwargs
@optimizer_kwargs.setter
def optimizer_kwargs(self, value):
if value is not None and not isinstance(value, dict):
raise ValueError('Optimizer kwargs must be either `None` or a dictionary')
self._optimizer_kwargs = value
#%% Public methods
def add_replicate(self, replicate_id:str, mappings:List[ParameterMapper]=None):
"""
Adds another replicate to the multi model Caretaker object.
Arguments
---------
replicate_id : str
The new replicate_id to be added.
Keyword arguments
-----------------
mappings : list of ParameterMapper or tuple
A list parameter mappings that should be applied to the new replicate_id.
Default is None, which implies that the local parameters names for the new replicate correspond to the global names.
Raises
------
AttributeError
In case the Caretaker object was created without explicit `replicate_ids` argument.
ValueError
The new replicate_id is not unique including the extisting ones.
KeyError
Any of the `mappings` aims not for the new replicate_id.
"""
# store current parameter mapping
_parameter_mappers = self._parameter_manager.get_parameter_mappers()
_parameters = self._get_all_parameters()
if len(self.replicate_ids) == 1 and self.replicate_ids[0] is None:
raise AttributeError('Cannot add replicate_id to implicitly defined single replicate Caretaker object')
_updated_replicate_ids = copy.deepcopy(self.replicate_ids)
_updated_replicate_ids.append(replicate_id)
if not Helpers.has_unique_ids(_updated_replicate_ids):
raise ValueError(Messages.non_unique_ids)
if mappings is not None:
for _mapping in mappings:
if _mapping.replicate_id != replicate_id:
raise KeyError('The given mapping does not aim for the new replicate')
self.__init__(
bioprocess_model_class=self.__bioprocess_model_class,
model_parameters=self.__model_parameters,
states=self.__states,
initial_values=self.__initial_values,
replicate_ids=_updated_replicate_ids,
initial_switches=self.__initial_switches,
model_name=self.__model_name,
observation_functions_parameters=self.__observation_functions_parameters,
)
self.set_parameters(_parameters)
self.apply_mappings(_parameter_mappers)
if mappings is not None:
self.apply_mappings(mappings)
def simulate(self, t:numpy.ndarray, parameters:dict=None, verbosity:int=40, reset_afterwards:bool=False, suppress_stdout:bool=True) -> list:
"""
Runs a forward simulation for the fully specified model and its observation functions (if specified).
Arguments
---------
t : numpy.ndarray or float
The time points for integration. In case a single time point is provided,
the solver will treat this as final integration time and chooses the intermediate steps on its own.
Keyword arguments
-----------------
parameters : dict
In case a simulation for specific parameter values is wanted.
Default is None.
verbosity : int
Prints solver statistics (quiet = 50, whisper = 40, normal = 30, loud = 20, scream = 10).
Default is 30.
suppress_stdout : bool
No printouts of integrator warnings, which are directed to stdout by the assimulo package.
Set to False for model debugging purposes.
Default is True.
Returns
-------
simulations : list
The collection of simulations results as list of ModelState or Observation objects.
"""
if parameters is not None:
_original_parameters = self._get_all_parameters()
self.set_parameters(parameters)
simulations = []
for _id in self.simulators.keys():
_simulator = self.simulators[_id]
simulations.extend(_simulator.simulate(t=t, verbosity=verbosity, reset_afterwards=reset_afterwards, suppress_stdout=suppress_stdout))
if parameters is not None:
self.set_parameters(_original_parameters)
return simulations
def estimate(self,
unknowns:dict, measurements:List[Measurement], bounds:List[Tuple]=None, metric:str='negLL', use_global_optimizer:bool=None,
report_level:int=0, reset_afterwards:bool=False, handle_CVodeError:bool=True, optimizer_kwargs:dict=None,
) -> Tuple[dict, dict]:
"""
Estimates values for requested unknowns according to a specific metric, given some measurements.
Arguments
---------
unknowns : dict or list
The parameters to be estimated. Can be any of the model parameters, initial values or observation parameters.
Providing a list of valid unknowns causes the use of scipy's differential evolution global optimizer.
A dictionary with parameter:guess as key-value pairs is needed to use the local but faster minimizer.
measurements : List[Measurement]
The data from which the parameter estimation is performed.
Can provide a Measurement object for any model state or observation.
Keyword arguments
-----------------
bounds : list of tuples
Bounds for for each unknown to be estimated.
Must be provided for use with differential evolution optimizer.
Default is None.
metric : str
The metric according to which the loss to be minimized is calculated.
Can be one of, e.g. `negLL` (negative log-likelihood), 'SS' (sum of squares), or `WSS` (weighted sum of squares).
Default is `negLL`, which implies that the corresponding Measurement object are accordingly specified.
use_global_optimizer : bool
Enforce the use of differential evolution optimizer.
Default is None, which makes this decision based on the the type of `unknowns` and `bounds`.
report_level : int
Enables informative output about the estimation process.
Default is 0, which is no output.
1 = prints estimated parameters and runtime of the estimation job.
2 = prints additionally the `OptimizeResult` result object, as returned by the optimizer
3 = prints additionally handled CVodeErrors, which arise from toxic parameters.
This has only effect in case `handle_CVodeError` is True
4 = prints additionally the progress of the optimizer.
reset_afterwards : bool
To reset the Caretaker object after the estimation has finished.
Default is False.
handle_CVodeError : bool
Catches CVodeError raised by the solver, in order to not interrupt the estimations for toxic parameter values.
Default is True.
Returns
-------
estimations : dict
Key-value pairs of the unknowns and corresponding estimated values.
estimation_info : dict
Additional information about the estimation job.
Raises
------
KeyError
Non-unique unknowns are provided.
ValueError
Invalid unknowns shall be estimated.
ValueError
No bounds are provided for use of differential evolution optimizer.
TypeError
A list containing not only Measurement objects is provided.
Warns
-----
UserWarning
The `optimizer_kwargs` argument is not None.
"""
for _item in measurements:
if not isinstance(_item, Measurement):
raise TypeError('Must provide a list of Measurement objects')
_start = time.time()
if not Helpers.has_unique_ids(unknowns):
raise KeyError(Messages.bad_unknowns)
# test if parameters are estimated that are not known to the model
_valid_unknowns = self._get_valid_parameter_names()
for _unknown in unknowns:
if _unknown not in _valid_unknowns:
raise ValueError(f'Detected invalid unknown to be estimated: {_unknown} vs. {_valid_unknowns}')
if use_global_optimizer == False and not isinstance(unknowns, dict):
raise ValueError('Must provide initial guesses to use the local minimizer')
# sort unknowns and corresponding bounds alphabetically and case-insensitive
# Decide also whether to use the local or global minimizer
_unknowns_names_sorted = sorted(unknowns, key=str.lower)
if isinstance(unknowns, dict):
_unknowns = {_unknown_name : unknowns[_unknown_name] for _unknown_name in _unknowns_names_sorted}
elif isinstance(unknowns, list):
_unknowns = {_unknown_name : None for _unknown_name in _unknowns_names_sorted}
if use_global_optimizer is None:
use_global_optimizer = True
if use_global_optimizer and bounds is None:
raise ValueError(Messages.missing_bounds)
if bounds is not None:
try:
_bounds = [bounds[unknowns.index(_unknown_name)] for _unknown_name in _unknowns_names_sorted]
except AttributeError:
_bounds = [bounds[list(unknowns.keys()).index(_unknown_name)] for _unknown_name in _unknowns_names_sorted]
# To protect for integration error when a bound is integer 0
_bounds = Helpers.bounds_to_floats(_bounds)
else:
_bounds = None
# Check for optimizer_kwargs to be used
_optimizer_kwargs = {}
if self.optimizer_kwargs is None:
_warning_flag = False
else:
_optimizer_kwargs.update(self.optimizer_kwargs)
_warning_flag = True
# check if the keyword argument `optimizer_kwargs` was set, which has a higher priority over the corresponding Caretaker property
if optimizer_kwargs is not None:
_optimizer_kwargs = optimizer_kwargs
if _warning_flag:
warnings.warn('Using the `optimizer_kwargs` keyword argument overrides the Caretaker property `optimizer_kwargs`.', UserWarning)
if report_level >= 3:
verbosity_CVodeError = True
else:
verbosity_CVodeError = False
if report_level >= 4 and 'disp' not in _optimizer_kwargs.keys():
_optimizer_kwargs['disp'] = True
if use_global_optimizer:
minimizer_scope = 'differential evolution optimizer'
if 'popsize' not in _optimizer_kwargs.keys():
popsize = 5*len(_unknowns)
_optimizer_kwargs['popsize'] = popsize
opt = differential_evolution(self._loss_fun_scipy,
bounds=_bounds,
args=(_unknowns,
metric,
measurements,
handle_CVodeError,
verbosity_CVodeError,
),
**_optimizer_kwargs,
)
else:
minimizer_scope = 'local minimizer'
if 'disp' in _optimizer_kwargs.keys():
options = {'disp' : _optimizer_kwargs['disp']}
del _optimizer_kwargs['disp']
_optimizer_kwargs['options'] = options
opt = minimize(self._loss_fun_scipy,
list(_unknowns.values()),
args=(_unknowns,
metric,
measurements,
handle_CVodeError,
verbosity_CVodeError,
),
bounds=_bounds,
**_optimizer_kwargs,
)
# Preparing returns
estimations = {_unknown : value for _unknown, value in zip(_unknowns, opt.x)}
estimation_info = {}
estimation_info['opt_info'] = opt
if metric in list(PRETTY_METRICS.keys()):
estimation_info['metric'] = PRETTY_METRICS[metric]
else:
estimation_info['metric'] = metric
estimation_info['loss'] = opt.fun
_end = time.time()
estimation_info['runtime_min'] = (_end - _start)/60
if report_level >= 1:
print(f'\n----------Results from {minimizer_scope}')
print('\nEstimated parameters:')
for estimation in estimations.keys():
print(f'{estimation}: {estimations[estimation]}')
print(f'\nRuntime was {estimation_info["runtime_min"]:.2f} min')
if report_level >= 2:
print('\n----------')
print(opt)
return estimations, estimation_info
def estimate_parallel(self,
unknowns:list, measurements:List[Measurement], bounds:List[Tuple],
metric:str='negLL', report_level:int=0,
optimizers:List[str]='de1220', optimizers_kwargs:List[dict]={}, log_each_nth_gen:int=None,
rel_pop_size:float=10.0, evolutions:int=5, archipelago_kwargs:dict={},
atol_islands:float=None, rtol_islands:float=1e-6,
max_runtime_min:float=None,
max_evotime_min:float=None,
max_memory_share:float=0.95,
handle_CVodeError:bool=True,
loss_calculator:LossCalculator=LossCalculator,
) -> Tuple[dict, ParallelEstimationInfo]:
"""
Estimates values for requested unknowns according to a specific metric, given some measurements,
using the generalized island model for parallelization that allows for global optimization.
This is provided by the pygmo package, which runs parallel evolutions of populations,
with migration of improved variants between the populations occuring.
For further info and use of pygmo, see https://github.com/esa/pygmo2, doi:10.5281/zenodo.3603747.
Arguments
---------
unknowns : dict or list
The parameters to be estimated. Can be any of the model parameters, initial values or observation parameters.
measurements : List[Measurement]
The data from which the parameter estimation is performed.
Can provide a Measurement object for any model state or observation.
bounds : list of tuples
Bounds for for each unknown to be estimated, in the following form [(lower, upper), ...]
Keyword arguments
----------------
metric : str
The metric according to which the loss to be minimized is calculated.
Can be one of, e.g. `negLL` (negative log-likelihood), `SS` (sum of squares), or `WSS` (weighted sum of squares).
Default is `negLL`, which implies that the corresponding Measurement object are accordingly specified.
report_level : int
Enables informative output about the estimation process. Information will be printed after each evolution.
Default is 0, which is no output.
1 = Prints the best loss, as well as information about archipelago creation and evolution.
For each completed evolution, a dot is printed.
2 = Prints additionally the best loss after each evolution
3 = Prints additionally average loss among all islands, and the runtime of each evolution.
4 = Prints additionally the parameter values for the best loss, and the average parameter values
among the champions of all islands in the `archipelago` after the evolutions.
optimizers : List[str] or str
A list of names for the pygmo optimization algorithms of choice. For a list of such to be conveniently used,
see `PygmoOptimizers` class of this module.
In case a list with one item is used, this optimizer is used for all explicitly
or implicitly (default None of `n_islands`) defined nunmber islands.
In case a list with >1 optimizers is used, the corresponding number of islands will be created within the archipelago.
The currently supported list of optimizer can be found at pyfoomb.generalized_islands.PygmoOptimizers.optimizers
Default is `de1220`, which makes each island to use this algorithm.
optimizers_kwargs : List[dict] or dict
A list of optimizer_kwargs as dicts, corresponding to the list of optimizers.
In case more >1 optimizers are used, the 1-item list of optimizer_kwargs will be applied to all of the optimizers.
Default is `[{}]`, i.e. no additional optimizer kwargs.
log_each_nth_gen : int
Specifies at which each n-th generation the algorithm stores logs.
Can be later extracted from the returned `archipelago` instance.
Note that only the log from the last evolution is stored in the archipelago.
Default is None, which disables logging.
rel_pop_size : float
Determines the population size on each island, relative to the number of unknown to be estimated,
i.e. pop_size = rel_pop_size * len(unknowns), rounded to the next integer.
Default is 10, which creates population sizes 10 times the number of unknowns.
evolutions : int
Defines how often the populations on the islands are evolved.
Migrations between the populations of the islands occur after each finished evolution.
Migration depends of the topology of the archipelago, as well as the defined migration polices,
which are parts of `archipelago_kwargs`.
Default is 5, which triggers five rounds of evolution.
archipelago_kwargs : dict
The keyword arguments for instantiation of the archipelago.
In case `archipelago_kwargs` has no key `t`, the `pygmo.fully_connected()` topology will be used
Default is {}, i.e. an empty dictionary, which implies the use of `pygmo.fully_connected()` topology.
atol_islands : float
Defines a stopping criterion that is checked after each evolution.
If the std of the islands' losses < atol_islands + rtol_islands * abs(mean(islands' losses)), then the optimization is stopped.
Default is None, which implies no effect for this argument.
rtol_islands : float
Defines a stopping criterion that is checked after each evolution.
If the std of the islands' losses < atol_islands + rtol_islands * abs(mean(islands' losses)), then the optimization is stopped.
Default is 1e-6.
max_runtime_min : float
The maximun time in min the estimation process may take. The current runtimee is evaluated after each completion of an evolution.
Default is None, which implies there is no maximum runtime for the estimation process.
max_evotime_min : float
The maximum cumulative pure evolution time the estimation process is allowed to take.
In contrast to the `max_runtime_min` stopping criterion, only the evolution runtime is considered,
without runtime needed for checking stopping criteria, reporting prints outs between each evolution, etc.
Default is None.
max_memory_share : float
Defines the allowed memory share in usage, for which no evolutions are run anymore.
Default is 0.95, meaning that repeat are only run if used memory share is less than 95 %.
handle_CVodeError : bool
Catches CVodeError raised by the solver, in order to not interrupt the estimations for toxic parameter values.
Default is True.
loss_calculator : LossCalculator
By subclassing `LossCalculator`, user-defined constraints can be implemented. The resulting subclass needs to be provided.
Default is LossCalculator, which implements no additional constraints
Returns
-------
best_estimates : dict
The estimated parameters, according to the best champion among all populations of the archipelago, aftet the last evolution.
estimation_result : ParallelEstimationInfo
Stores the archipelago and the history of the previous evolutions.
Needed to continue an estimation process.
Raises
------
KeyError
Non-unique unknowns are provided.
ValueError
Invalid unknowns shall be estimated.
"""
_start = time.time()
if len(unknowns) != len(bounds):
raise ValueError('Number of unkowns does not match number of pairs of upper and lower bounds.')
if not isinstance(optimizers, list) and isinstance(optimizers, str):
optimizers = [optimizers]
if not isinstance(optimizers_kwargs, list) and isinstance(optimizers_kwargs, dict):
optimizers_kwargs = [optimizers_kwargs]
if not Helpers.has_unique_ids(unknowns):
raise KeyError(Messages.bad_unknowns)
# test if parameters are estimated that are not known to the model
_valid_unknowns = self._get_valid_parameter_names()
for _unknown in unknowns:
if _unknown not in _valid_unknowns:
raise ValueError(f'Detected invalid unknown to be estimated: {_unknown} vs. {_valid_unknowns}')
# sort unknowns and corresponding bounds
_unknowns_names_sorted = sorted(unknowns, key=str.lower)
_unknowns = [_unknown_name for _unknown_name in _unknowns_names_sorted]
_bounds = [bounds[unknowns.index(_unknown_name)] for _unknown_name in _unknowns_names_sorted]
if report_level >= 5:
_verbosity_CVodeError = True
else:
_verbosity_CVodeError = False
# get the problem
pg_problem = pygmo.problem(
loss_calculator(
unknowns=_unknowns,
bounds=_bounds,
metric=metric,
measurements=measurements,
caretaker_loss_fun=self.loss_function,
handle_CVodeError=handle_CVodeError,
verbosity_CVodeError=_verbosity_CVodeError,
),
)
# get the archipelago
archipelago = ArchipelagoHelpers.create_archipelago(
_unknowns,
optimizers,
optimizers_kwargs,
pg_problem,
rel_pop_size,
archipelago_kwargs,
log_each_nth_gen,
report_level,
)
archipelago.problem = loss_calculator
estimation_result = ParallelEstimationInfo(archipelago=archipelago)
return self.estimate_parallel_continued(
estimation_result=estimation_result,
evolutions=evolutions,
report_level=report_level,
atol_islands=atol_islands,
rtol_islands=rtol_islands,
max_runtime_min=max_runtime_min,
max_evotime_min=max_evotime_min,
max_memory_share=max_memory_share,
start_time=_start,
)
def estimate_parallel_continued(self,
estimation_result:ParallelEstimationInfo, evolutions:int=1, report_level:int=0,
atol_islands:float=None, rtol_islands:float=1e-6,
max_runtime_min:float=None,
max_evotime_min:float=None,
max_memory_share:float=0.95,
start_time:float=None,
) -> Tuple[dict, ParallelEstimationInfo]:
"""
Continues a parallel parameter estimation job by running more evolutions on a corresponding archipelago object.
Arguments
---------
estimation_result : ParallelEstimationInfo
Stores the archipelago and the history of the previous evolutions as returned by method 'estimate_parallel'.
Needed to continue an estimation process.
Keyword arguments
-----------------
evolutions : int
Defines how often the populations on the islands are evolved.
Migrations between the populations of the islands occur after an evolution.
report_level : int
Enables informative output about the estimation process. Information will be printed after each evolution.
Default is 0, which is no output.
1 = Prints the best loss, as well as information about archipelago creation and evolution.
For each completed evolution, a dot is printed.
2 = Prints additionally the best loss after each evolution
3 = Prints additionally average loss among all islands, and the runtime of each evolution.
4 = Prints additionally the parameter values for the best loss, and the average parameter values
among the champions of all islands in the `archipelago` after the evolutions.
atol_islands : float
Defines a stopping criterion that is checked after each evolution.
If the std of the islands' losses < atol_islands + rtol_islands * abs(mean(islands' losses)), then the optimization is stopped.
Default is None, which implies no effect for this argument.
rtol_islands : float
Defines a stopping criterion that is checked after each evolution.
If the std of the islands' losses < atol_islands + rtol_islands * abs(mean(islands' losses)), then the optimization is stopped.
Default is 1e-6.
max_runtime_min : float
The maximum runtime in min the estimation process is allowed to take.
The current runtime is evaluated after each completion of an evolution.
Default is None, which implies that there is no time limit for the estimation process.
max_evotime_min : float
The maximum cumulative pure evolution time the estimation process is allowed to take.
In contrast to the `max_runtime_min` stopping criterion, only the evolution runtime is considered,
without runtime needed for checking stopping criteria, reporting prints outs between each evolution, etc.
Default is None.
max_memory_share : float
Defines the allowed memory share in usage, for which no evolutions are run anymore.
Default is 0.95, meaning that repeat are only run if used memory share is less than 95 %.
start_time : float
In case a total runtime, started from another method, shall be reported.
Default is None, which measured the total run time only within this method.
Returns
-------
best_estimates : dict
The estimated parameters, according to the best champion among all populations of the archipelago, aftet the last evolution.
estimation_result : ParallelEstimationInfo
Stores the archipelago and the history of the previous evolutions.
Needed to continue an estimation process.
Raises
------
ValueError
Not all island of the archipelago have the same unknowns.
"""
if start_time is None:
start_time = time.time()
archipelago = estimation_result.archipelago
evolutions_trail = estimation_result.evolutions_trail.copy()
if report_level >= 1:
if not evolutions_trail['evo_time_min']:
_filler = ''
else:
_filler = 'additional '
print(f'Running {_filler}{evolutions} evolutions for all {len(archipelago)} islands of the archipelago...\n')
_current_evotime_min = 0
for _evolution in range(1, evolutions+1):
_evo_start = time.time()
archipelago.evolve()
archipelago.wait_check()
_evo_end = time.time()
_evo_time_min = (_evo_end - _evo_start)/60
_current_evotime_min += _evo_time_min
best_estimates, best_loss, estimates_info = ArchipelagoHelpers.extract_archipelago_results(archipelago)
evolutions_trail['evo_time_min'].append(_evo_time_min)
evolutions_trail['best_losses'].append(best_loss)
evolutions_trail['best_estimates'].append(best_estimates)
evolutions_trail['estimates_info'].append(estimates_info)
if report_level == 1:
if _evolution % 120 == 0:
end = '\n'
else:
end = ''
print('.', end=end)
elif report_level >= 2:
ArchipelagoHelpers.report_evolution_result(evolutions_trail, report_level)
# evaluate stopping criteria after each evolution
_current_runtime_min = (time.time() - start_time)/60
stopping_criteria = ArchipelagoHelpers.check_evolution_stop(
current_losses=estimates_info['losses'],
atol_islands=atol_islands,
rtol_islands=rtol_islands,
current_runtime_min=_current_runtime_min,
max_runtime_min=max_runtime_min,
current_evotime_min=_current_evotime_min,
max_evotime_min=max_evotime_min,
max_memory_share=max_memory_share,
)
if any(stopping_criteria.values()):
if report_level >= 1:
print(f'\nReached a stopping criterion after evolution {len(evolutions_trail["evo_time_min"])}:')
for _st in stopping_criteria:
print(f'{_st}: {stopping_criteria[_st]}')
early_stop = True
break
else:
early_stop = False
if report_level >= 1:
if not early_stop:
print(f'\nCompleted {_evolution} {_filler}evolution runs.')
print('\nEstimated parameters:')
for p in best_estimates:
print(f'{p}: {best_estimates[p]}')
print('')
ArchipelagoHelpers.report_evolution_result(evolutions_trail, report_level=3)
_runtime_min = (time.time() - start_time)/60
evolutions_trail['cum_runtime_min'].append(_runtime_min)
if report_level >= 1:
if _runtime_min/60 > 1:
print(f'\nTotal runtime was {_runtime_min/60:.2f} h\n')
else:
print(f'\nTotal runtime was {_runtime_min:.2f} min\n')
estimation_result = ParallelEstimationInfo(archipelago=archipelago, evolutions_trail=evolutions_trail)
return best_estimates, estimation_result
def estimate_parallel_MC_sampling(self,
unknowns:list,
measurements:List[Measurement],
bounds:List[Tuple],
mc_samples:int=25,
reuse_errors_as_weights:bool=True,
metric:str='negLL',
report_level:int=0,
optimizers:List[str]='de1220',
optimizers_kwargs:List[dict]={},
rel_pop_size:float=10.0,
evolutions:int=25,
archipelago_kwargs:dict={},
atol_islands:float=None,
rtol_islands:float=1e-6,
n_islands:int=4,
handle_CVodeError:bool=True,
loss_calculator:LossCalculator=LossCalculator,
jobs_to_save:int=None,
max_memory_share:float=0.95,
) -> pandas.DataFrame:
"""
Performs Monte-Carlo sampling from measurements to create new measurements, according to the statitical distribution of the respective Measurement objects.
For each newly created measurement, the requested unknowns (parameters) are estimated, resulting in an empirical distribution of parameter values.
these empirical distributions for the parameters can be assessed for uncertainties and correlations.
For each MC sample, a parallel estimation procedure is carried out, for details see methods `estimate_parallel` and `estimate_parallel_continued`.
Depending on the available number of CPUs on your machine, these estimation procedure are run in parallel.
The selection of suitable hyperparameters, e.g. which optimizers, etc., use method `estimate_parallel` and refer to corresponding Jupyter notebooks.
NOTE: To increase the number of MC samples to an arbiratry high number, set `repeats_to_save` argument.
Afterwards, the results saved to disk can be read and merged.
NOTE: This method puts considerable computational load on your machine.
Arguments
---------
unknowns : dict or list
The parameters to be estimated. Can be any of the model parameters, initial values or observation parameters.
measurements : List[Measurement]
The measurements from which the parameters are to be estimated.
bounds : List[tuple]
List of tuples (lower, upper), one tuple for each parameter. Must be provided when using the global optimizer.
Default is None.
Keyword arguments
-----------------
mc_samples : int
The number of MC samples that shall be drawn from the measurement data.
Default is 25.
reuse_errors_as_weights : bool
Uses the measurement errors as weights for each set of measurement samples drawn.
Default is True.
metric : str
The metric according to which the loss to be minimized is calculated.
Can be one of, e.g. `negLL` (negative log-likelihood), 'SS' (sum of squares), or `WSS` (weighted sum of squares).
Default is `negLL`, which implies that the corresponding Measurement objects are accordingly specified.
report_level : int
Enables informative output about the estimation process.
Default is 0, which is no output.
1 = Prints a dot for each processing batch, the total runtime and the ratio of samples that reached convergence.
2 = Prints likewise for 1, but with more information on each batch.
3 = Prints additionally the runtime of each batch, as well as as summary for the obtained parameter distributions.
4 = Prints additionally the reason for a MS sample to finish (i.e.,
whether convergence or the maximum number of evolutions was reached).
5 = Prints additionally information on the creation of the archipelagos for each batch
6 = Prints additionally the current evolution for each MC samples, and report any handled integration error.
optimizers : List[str] or str
A list of names for the pygmo optimization algorithms of choice. For a list of such to be conveniently used,
see `PygmoOptimizers` class of this module.
In case a list with one item is used, this optimizer is used for all explicitly
or implicitly (default None of `n_islands`) defined nunmber islands.
In case a list with >1 optimizers is used, the corresponding number of islands will be created within the archipelago.
The currently supported list of optimizer can be found at pyfoomb.generalized_islands.PygmoOptimizers.optimizers
Default is `de1220`, which makes each island to use this algorithm.
optimizers_kwargs : List[dict] or dict
A list of optimizer_kwargs as dicts, corresponding to the list of optimizers.
In case more >1 optimizers are used, the 1-item list of optimizer_kwargs will be applied to all of the optimizers.
Default is `{}`, i.e. no additional optimizer kwargs.
rel_pop_size : float
Determines the population size on each island, relative to the number of unknown to be estimated,
i.e. pop_size = rel_pop_size * len(unknowns), rounded to the next integer.
Default is 10, which creates population sizes 10 times the number of unknowns.
evolutions : int
Defines how often the populations on the islands are evolved.
Migrations between the populations of the islands occur after each finished evolution.
Migration depends of the topology of the archipelago, as well as the defined migration polices,
which are parts of `archipelago_kwargs`.
Default is 5, which triggers five rounds of evolution.
archipelago_kwargs : dict
The keyword arguments for instantiation of the archipelago.
In case `archipelago_kwargs` has no key `t`, the `pygmo.fully_connected()` topology will be used
Default is {}, i.e. an empty dictionary, which implies the use of `pygmo.fully_connected()` topology.
atol_islands : float
Defines a stopping criterion that is checked after each evolution.
If the std of the islands' losses < atol_islands + rtol_islands * abs(mean(islands' losses)), then the optimization is stopped.
Default is None, which implies no effect for this argument.
rtol_islands : float
Defines a stopping criterion that is checked after each evolution.
If the std of the islands' losses < atol_islands + rtol_islands * abs(mean(islands' losses)), then the optimization is stopped.
Default is 1e-6.
n_islands : int
Specifies the number of parallel estimations per MC samples for all archipelagos in an estimation batch.
In case a list of optimizers is provided, the number of islands is implicitly defined by its length.
Must use values > 1.
Default is 4.
handle_CVodeError : bool
Catches CVodeError raised by the solver, in order to not interrupt the estimations for toxic parameter values.
Default is True.
loss_calculator : LossCalculator
By subclassing `LossCalculator`, user-defined constraints can be implemented. The resulting subclass needs to be provided.
Default is LossCalculator, which implements no additional constraints
jobs_to_save : int
Set to repeatedly run the specifified number of MC samples and to save the results from each repeat to file.
Default is None, which causes no result storage ti file.
max_memory_share : float
Defines the allowed memory share in usage, for which no repeats are run anymore. Has only effect if `jobs_to_save` is not None.
Default is 0.95, meaning that repeat are only run if used memory share is less than 95 %.
Returns
-------
estimates : pandas.DataFrame
The values from repeated estimation for the requested unknowns.
Only converged estimations are included.
Raises
------
AttributeError
Measurements have no errors.
ValueError
Degree of archipelago parallelization is < 2.
TypeError
A list containing not only Measurement objects is provided.
KeyError:
Non-unique unknowns detected.
ValueError:
Invalid parameters shall be estimated.
"""
if jobs_to_save is None:
_estimate = self._estimate_parallel_MC_sampling(
unknowns=unknowns,
measurements=measurements,
bounds=bounds,
mc_samples=mc_samples,
reuse_errors_as_weights=reuse_errors_as_weights,
metric=metric,
report_level=report_level,
optimizers=optimizers,
optimizers_kwargs=optimizers_kwargs,
rel_pop_size=rel_pop_size,
evolutions=evolutions,
archipelago_kwargs=archipelago_kwargs,
atol_islands=atol_islands,
rtol_islands=rtol_islands,
n_islands=n_islands,
handle_CVodeError=handle_CVodeError,
loss_calculator=loss_calculator,
)
return pandas.DataFrame.from_dict(_estimate)
_estimate_batches = []
session_id = int(time.monotonic())
for i in range(1, jobs_to_save+1):
curr_memory_share = psutil.virtual_memory().percent/100
if curr_memory_share > max_memory_share:
print(f'Cannot run MC estimation job due to low memory: {(1-curr_memory_share)*100:.2f} % free memory left')
else:
_estimate_batch = self._estimate_parallel_MC_sampling(
unknowns=unknowns,
measurements=measurements,
bounds=bounds,
mc_samples=mc_samples,
reuse_errors_as_weights=reuse_errors_as_weights,
metric=metric,
report_level=report_level,
optimizers=optimizers,
optimizers_kwargs=optimizers_kwargs,
rel_pop_size=rel_pop_size,
evolutions=evolutions,
archipelago_kwargs=archipelago_kwargs,
atol_islands=atol_islands,
rtol_islands=rtol_islands,
n_islands=n_islands,
handle_CVodeError=handle_CVodeError,
loss_calculator=loss_calculator,
)
_filename = f'{self.model_name}_MC-sample-estimates_session-id-{session_id}_job-{i}.xlsx'
_df = pandas.DataFrame.from_dict(_estimate_batch)
_estimate_batches.append(_df)
_df.to_excel(_filename)
if report_level > 0:
print(f'Current memory usage is {psutil.virtual_memory().percent:.2f} %.\nSaved results of job #{i} to file: {_filename}\n')
return pandas.concat(_estimate_batches, ignore_index=True)
def _estimate_parallel_MC_sampling(self,
unknowns:list,
measurements:List[Measurement],
bounds:List[Tuple],
mc_samples:int=25,
reuse_errors_as_weights:bool=True,
metric:str='negLL',
report_level:int=0,
optimizers:List[str]='de1220',
optimizers_kwargs:List[dict]={},
rel_pop_size:float=10.0,
evolutions:int=25,
archipelago_kwargs:dict={},
atol_islands:float=None,
rtol_islands:float=1e-6,
n_islands:int=4,
handle_CVodeError:bool=True,
loss_calculator:LossCalculator=LossCalculator,
) -> dict:
"""
Performs Monte-Carlo sampling from measurements to create new measurements, according to the statitical distribution of the respective Measurement objects.
For each newly created measurement, the requested unknowns (parameters) are estimated, resulting in an empirical distribution of parameter values.
these empirical distributions for the parameters can be assessed for uncertainties and correlations.
For each MC sample, a parallel estimation procedure is carried out, for details see methods `estimate_parallel` and `estimate_parallel_continued`.
Depending on the available number of CPUs on your machine, these estimation procedure are run in parallel.
The selection of suitable hyperparameters, e.g. which optimizers, etc., use method `estimate_parallel` and refer to corresponding Jupyter notebooks.
NOTE: To increase the number of MC samples to an arbiratry high number, run this method several times and store intermediate results.
Afterwards, these can be merged.
NOTE: This method puts considerable computational load on your machine.
Arguments
---------
unknowns : dict or list
The parameters to be estimated. Can be any of the model parameters, initial values or observation parameters.
measurements : List[Measurement]
The measurements from which the parameters are to be estimated.
bounds : List[tuple]
List of tuples (lower, upper), one tuple for each parameter. Must be provided when using the global optimizer.
Default is None.
Keyword arguments
-----------------
mc_samples : int
The number of MC samples that shall be drawn from the measurement data.
Default is 25.
reuse_errors_as_weights : bool
Uses the measurement errors as weights for each set of measurement samples drawn.
Default is True.
metric : str
The metric according to which the loss to be minimized is calculated.
Can be one of, e.g. `negLL` (negative log-likelihood), 'SS' (sum of squares), or `WSS` (weighted sum of squares).
Default is `negLL`, which implies that the corresponding Measurement objects are accordingly specified.
report_level : int
Enables informative output about the estimation process.
Default is 0, which is no output.
1 = Prints a dot for each processing batch, the total runtime and the ratio of samples that reached convergence.
2 = Prints likewise for 1, but with more information on each batch.
3 = Prints additionally the runtime of each batch, as well as as summary for the obtained parameter distributions.
4 = Prints additionally the reason for a MS sample to finish (i.e.,
whether convergence or the maximum number of evolutions was reached).
5 = Prints additionally information on the creation of the archipelagos for each batch
6 = Prints additionally the current evolution for each MC samples, and report any handled integration error.
optimizers : List[str] or str
A list of names for the pygmo optimization algorithms of choice. For a list of such to be conveniently used,
see `PygmoOptimizers` class of this module.
In case a list with one item is used, this optimizer is used for all explicitly
or implicitly (default None of `n_islands`) defined nunmber islands.
In case a list with >1 optimizers is used, the corresponding number of islands will be created within the archipelago.
The currently supported list of optimizer can be found at pyfoomb.generalized_islands.PygmoOptimizers.optimizers
Default is `de1220`, which makes each island to use this algorithm.
optimizers_kwargs : List[dict] or dict
A list of optimizer_kwargs as dicts, corresponding to the list of optimizers.
In case more >1 optimizers are used, the 1-item list of optimizer_kwargs will be applied to all of the optimizers.
Default is `{}`, i.e. no additional optimizer kwargs.
rel_pop_size : float
Determines the population size on each island, relative to the number of unknown to be estimated,
i.e. pop_size = rel_pop_size * len(unknowns), rounded to the next integer.
Default is 10, which creates population sizes 10 times the number of unknowns.
evolutions : int
Defines how often the populations on the islands are evolved.
Migrations between the populations of the islands occur after each finished evolution.
Migration depends of the topology of the archipelago, as well as the defined migration polices,
which are parts of `archipelago_kwargs`.
Default is 5, which triggers five rounds of evolution.
archipelago_kwargs : dict
The keyword arguments for instantiation of the archipelago.
In case `archipelago_kwargs` has no key `t`, the `pygmo.fully_connected()` topology will be used
Default is {}, i.e. an empty dictionary, which implies the use of `pygmo.fully_connected()` topology.
atol_islands : float
Defines a stopping criterion that is checked after each evolution.
If the std of the islands' losses < atol_islands + rtol_islands * abs(mean(islands' losses)), then the optimization is stopped.
Default is None, which implies no effect for this argument.
rtol_islands : float
Defines a stopping criterion that is checked after each evolution.
If the std of the islands' losses < atol_islands + rtol_islands * abs(mean(islands' losses)), then the optimization is stopped.
Default is 1e-6.
n_islands : int
Specifies the number of parallel estimations per MC samples for all archipelagos in an estimation batch.
In case a list of optimizers is provided, the number of islands is implicitly defined by its length.
Must use values > 1.
Default is 4.
handle_CVodeError : bool
Catches CVodeError raised by the solver, in order to not interrupt the estimations for toxic parameter values.
Default is True.
loss_calculator : LossCalculator
By subclassing `LossCalculator`, user-defined constraints can be implemented. The resulting subclass needs to be provided.
Default is LossCalculator, which implements no additional constraints
Returns
-------
estimates : dict
The values from repeated estimation for the requested unknowns.
Only converged estimations are included.
estimates_info : dict
Contains additional information for all finished jobs.
Raises
------
AttributeError
Measurements have no errors.
ValueError
Degree of archipelago parallelization is < 2.
TypeError
A list containing not only Measurement objects is provided.
KeyError:
Non-unique unknowns detected.
ValueError:
Invalid parameters shall be estimated.
"""
_start = time.time()
# Some input error checkings
if len(unknowns) != len(bounds):
raise ValueError('Number of unkowns does not match number of pairs of upper and lower bounds.')
for _item in measurements:
if not isinstance(_item, Measurement):
raise TypeError('Must provide a list of Measurement objects')
if not Helpers.all_measurements_have_errors(measurements):
raise AttributeError('Measurements must have errors')
if not Helpers.has_unique_ids(unknowns):
raise KeyError(Messages.bad_unknowns)
_valid_unknowns = self._get_valid_parameter_names()
for _unknown in unknowns:
if _unknown not in _valid_unknowns:
raise ValueError(f'Detected invalid unknown to be estimated: {_unknown} vs. {_valid_unknowns}')
# Sort unknowns and corresponding bounds
_unknowns_names_sorted = sorted(unknowns, key=str.lower)
_unknowns = [_unknown_name for _unknown_name in _unknowns_names_sorted]
_bounds = [bounds[unknowns.index(_unknown_name)] for _unknown_name in _unknowns_names_sorted]
if report_level >= 6:
_verbosity_CVodeError = True
else:
_verbosity_CVodeError = False
# Check whether to use the same optimizer for all islands or a list of optimizers
if isinstance(optimizers, str):
optimizers = [optimizers]*n_islands
elif isinstance(optimizers, list):
n_islands = len(optimizers)
if isinstance(optimizers_kwargs, dict):
optimizers_kwargs = [optimizers_kwargs]*n_islands
if n_islands < 2:
raise ValueError('Must use at least 2 islands per archipelago, either by specifying `n_islands` or using a list with more than 1 optimizers for kwargs `optimizers`.')
if atol_islands is None:
atol_islands = 0.0
if rtol_islands is None:
rtol_islands = 0.0
# Calculate the number of archipelagos that can be run in parallel
n_archis = max([int(numpy.floor(joblib.cpu_count()/n_islands)), 1])
# Collects all finished estimation jobs
_mc_estimates = []
# Run parallel estimation jobs batch-wise
for i in range(1, mc_samples+1, n_archis):
_batch_no = int((i-1+n_archis)/n_archis)
if report_level == 1:
if _batch_no % 120 == 0:
end = '\n'
else:
end = ''
print('.', end=end)
elif report_level >= 2:
if report_level >= 3:
_insert = '\n'
else:
_insert = ''
_current_samples = [_sample for _sample in range(i, _batch_no*n_archis+1) if _sample <= mc_samples]
_first = _current_samples[0]
_last = f' to {_current_samples[-1]}' if _current_samples[-1] != _first else ''
print(f'{_insert}------- Starting batch #{_batch_no} for MC sample {_first}{_last}.')
# Initialize the current batch
_batch_start = time.time()
mc_count = i
active_archis = []
# Create a batch of achipelagos
for j in range(n_archis):
if mc_count+j > mc_samples:
break
# Create the problem for a MC sample
_pg_problem = pygmo.problem(
loss_calculator(
unknowns=_unknowns,
bounds=_bounds,
metric=metric,
measurements=self._draw_measurement_samples(measurements, reuse_errors_as_weights),
caretaker_loss_fun=self.loss_function,
handle_CVodeError=handle_CVodeError,
verbosity_CVodeError=_verbosity_CVodeError,
)
)
# Create the archipelago for the current problem
_archi = ArchipelagoHelpers.create_archipelago(
unknowns=_unknowns,
optimizers=optimizers,
optimizers_kwargs=optimizers_kwargs,
pg_problem=_pg_problem,
rel_pop_size=rel_pop_size,
archipelago_kwargs=archipelago_kwargs,
log_each_nth_gen=None,
report_level=0,
)
_archi.mc_info = f'MC sample #{mc_count+j}'
_archi.finished = False
_archi.problem = loss_calculator
_archi.wait_check()
if report_level >= 5:
print(f'{_archi.mc_info}: created archipelago with {len(_archi)} islands')
active_archis.append(_archi)
# Evolve the archipelagos in the current batch
for j in range(len(active_archis)):
for evo in range(1, evolutions+1):
# Start an async evolution for all non-converged archis
for _archi in active_archis:
if not _archi.finished:
if report_level >= 6:
print(f'\t{_archi.mc_info}: running evolution {evo}')
_archi.evolve()
# Wait for all archis to finish
for _archi in active_archis:
if not _archi.finished:
_archi.wait_check()
# Check the archis for results
for _archi in active_archis:
# Calculate convergence criterion
_losses = numpy.array(_archi.get_champions_f()).flatten()
_stop_criterion = atol_islands + rtol_islands * numpy.abs(numpy.mean(_losses))
_abs_std = numpy.std(_losses, ddof=1)
# Check for convergence for the non-finished archi
if _abs_std < _stop_criterion and not _archi.finished:
_best_estimates = ArchipelagoHelpers.estimates_from_archipelago(_archi)
_best_estimates['convergence'] = True
_best_estimates['max_evos'] = False
_best_estimates['archi'] = f'{_archi.mc_info}'
_best_estimates['losses'] = _losses
_archi.finished = True
_mc_estimates.append(_best_estimates)
if report_level >= 4:
print(f'{_archi.mc_info}: convergence')
# Check for max evolutions for the non-finished archi
elif evo == evolutions and not _archi.finished:
_best_estimates = ArchipelagoHelpers.estimates_from_archipelago(_archi)
_best_estimates['convergence'] = False
_best_estimates['max_evos'] = True
_best_estimates['archi'] = f'{_archi.mc_info}'
_best_estimates['losses'] = _losses
_archi.finished = True
_mc_estimates.append(_best_estimates)
if report_level >= 4:
print(f'{_archi.mc_info}: no convergence after max. evolutions ({evo}).')
if all([_archi.finished for _archi in active_archis]):
del active_archis
gc.collect()
active_archis = []
break
if all([_archi.finished for _archi in active_archis]):
del active_archis
gc.collect()
active_archis = []
break
if report_level >= 3:
print(f'Runtime for batch {_batch_no} was {(time.time() - _batch_start)/60:.2f} min.')
# All requested MC samples were run
if mc_count > mc_samples:
break
# Comprehend results
aug_unknowns = [*_unknowns, 'convergence', 'max_evos', 'archi', 'losses']
estimates_info = {str(_p) : [] for _p in aug_unknowns}
for _mc_estimate in _mc_estimates:
for _p in _mc_estimate:
estimates_info[_p].append(_mc_estimate[_p])
estimates = {
str(_p) : numpy.array(numpy.array(estimates_info[_p])[estimates_info['convergence']]) # include only samples that converged
for _p in _unknowns
}
if report_level >= 1:
_end = time.time()
# Runtime was > 1 h
if (_end-_start)/3600 > 1:
print(f'\n-----------------------------------------------\nTotal runtime was {(_end-_start)/3600:.2f} h.')
else:
print(f'\n-----------------------------------------------\nTotal runtime was {(_end-_start)/60:.2f} min.')
print(f'Convergence ratio was {numpy.sum(estimates_info["convergence"])/len(estimates_info["convergence"])*100:.1f} %.')
if report_level >= 3:
print('\nSummaries for empirical parameter distributions\n-----------------------------------------------')
print(pandas.DataFrame(estimates).describe().T)
return estimates
def estimate_repeatedly(self,
unknowns:list, measurements:List[Measurement], bounds:List[tuple], metric:str='negLL',
jobs:int=10, rel_jobs:float=None, report_level:int=0, reset_afterwards:bool=False, handle_CVodeError:bool=True,
) -> Tuple[dict, list]:
"""
Runs a several global estimations for the requested unknowns. Resulting distributions for the estimated parameters
can be inspected for measures of dispersion or correlations among parameters. In case a rather high number of estimation jobs is run,
the resulting distributions can be nicely investigated using the Visualization.show_parameter_distributions method.
NOTE: This method puts considerable computational load on your machine.
Arguments
---------
unknowns : list or dict
The parameters to be estimated. Can be any of the model parameters, initial values or observation parameters.
In case a dict is provided, the corresponding values are ignored.
measurements : List[Measurement]
The data, from which the repeated estimation is performed.
Can provide a Measurement object for any model state or observation.
bounds : List[Tuple]
Bounds for for each unknown to be estimated.
Keyword arguments
-----------------
metric : str
The metric according to which the loss to be minimized is calculated.
Can be one of, e.g. `negLL` (negative log-likelihood), 'SS' (sum of squares), or `WSS` (weighted sum of squares).
Default is `negLL`, which implies that the corresponding Measurement object are accordingly specified.
jobs : int
The number of estimation jobs that are requested.
Default is 10.
rel_jobs : float
Number of estimation jobs, relative to the number of unknowns: rel_jobs * number of unknowns.
Overrides jobs argument. Default is None, which implies use of `jobs`.
report_level : int
Enables informative output about the estimation process.
Default is 0, which is no output.
1 = prints a summary of the empirical parameter distributions and basic information about the parallization.
2 = reports additionally about each parallel estimation job.
reset_afterwards : bool
To reset the Caretaker object after the estimation has finished.
Default is False.
handle_CVodeError : bool
Catches CVodeError raised by the solver, in order to not interrupt the estimations for toxic parameter values.
Default is True.
Returns
-------
repeated_estimates : dict
The values from repeated estimation for the requested unknowns.
results : list
Contains estimation_info dicts for each estimation job.
TypeError
A list containing not only Measurement objects is provided.
Warns
-----
UserWarning
Property `optimizer_kwargs` of this Caretaker instance has key `disp`.
"""
warnings.warn(
'This method will be deprecated in future releases of pyFOOMB.',
PendingDeprecationWarning
)
for _item in measurements:
if not isinstance(_item, Measurement):
raise TypeError('Must provide a list of Measurement objects')
if isinstance(unknowns, list):
unknowns = {unknown : None for unknown in unknowns}
# rel_jobs overrides jobs
if rel_jobs is not None:
jobs = int(numpy.ceil(rel_jobs * len(bounds)))
if self.optimizer_kwargs is not None:
if 'disp' in self.optimizer_kwargs.keys():
warnings.warn(
'Reporting progress for each single optimization job is deactivated for parallel multi-estimation methods',
UserWarning,
)
# collect arg instances for each parallel job
arg_instances = [
{
'unknowns' : unknowns,
'measurements' : measurements,
'bounds' : bounds,
'metric' : metric,
'use_global_optimizer' : True,
'handle_CVodeError' : handle_CVodeError,
'optimizer_kwargs' : {'disp' : False},
}
for job in range(jobs)
]
parallel_verbosity = 0
if report_level >= 1:
parallel_verbosity = 1
if report_level >= 2:
parallel_verbosity = 11
# do the jobs
repeated_estimates, results = self._estimate_parallelized_helper(arg_instances, unknowns, parallel_verbosity)
if report_level >= 1:
print('\nSummaries for empirical parameter distributions\n-----------------------------------------------')
print(pandas.DataFrame(repeated_estimates).describe().T)
if report_level >= 2:
_runtimes_min = [result[1]['runtime_min'] for result in results]
print(f'\nAverage runtime per estimation job was {numpy.mean(_runtimes_min):.2f} +/- {numpy.std(_runtimes_min, ddof=1):.2f} min')
return repeated_estimates, results
def estimate_MC_sampling(self,
unknowns:list, measurements:List[Measurement], bounds:List[tuple]=None,
metric:str='negLL', reuse_errors_as_weights:bool=True,
mc_samples:int=100, rel_mc_samples:float=None,
report_level:int=0, reset_afterwards:bool=True, use_global_optimizer:bool=True,
handle_CVodeError:bool=True,
) -> Tuple[dict, dict]:
"""
Performs Monte-Carlo sampling from measurements, and re-estimates parameters. Per default, global optimization is used.
Resulting bootstrapped distributions for the parameters can be assessed for parameter uncertainties and correlations.
NOTE: This method puts considerable computational load on your machine.
Arguments
---------
unknowns : dict or list
The parameters to be estimated. Can be any of the model parameters, initial values or observation parameters.
Providing a list of valid unknowns causes the use of scipy's differential evolution optimizer.
To use the local minimizer, unknowns must be of type dict, with initial guesses as values.
measurements : List[Measurement]
The measurements from which the parameters are to be estimated.
Keyword arguments
-----------------
bounds : List[tuple]
List of tuples (lower, upper), one tuple for each parameter. Must be provided when using the global optimizer.
Default is None.
metric : str
The metric according to which the loss to be minimized is calculated.
Can be one of, e.g. `negLL` (negative log-likelihood), 'SS' (sum of squares), or `WSS` (weighted sum of squares).
Default is `negLL`, which implies that the corresponding Measurement object are accordingly specified.
reuse_errors_as_weights : bool
Uses the measurement errors as weights for each set of measurement samples drawn.
Default is True.
mc_samples : int
The number of MC samples that shall be drawn from the measurement data.
Default is 100.
rel_mc_samples : float
Number of MC samples = rel_mc_samples * number of measurement points. Overrides mc_samples. Default is None.
report_level : int
Enables informative output about the estimation process.
Default is 0, which is no output.
1 = prints a summary of the empirical parameter distributions and basic information about the parallization.
2 = reports additionally about each parallel estimation job.
reset_afterwards : bool
Resets the Caretaker object after the estimation has finished.
Default is False.
use_global_optimizer : bool
Use global optimizer instead of local minimizer for repeated parameter estimation.
Default is True.
parallel_verbosity : int
Control output level of the parallel job work, default is 0.
See joblib documentation for futher details.
handle_CVodeError : bool
Catches CVodeError raised by the solver, in order to not interrupt the estimations for toxic parameter values.
Default is True.
Returns
-------
repeated_estimates : dict
The values from repeated estimation for the requested unknowns.
results : list
Contains estimation_info dicts for each estimation job.
Raises
------
TypeError
No initial guesses are provided when using the local optimizer.
TypeError
No bounds provided when using the global optimizer.
ValueError
Measurements have no errors.
TypeError
A list containing not only Measurement objects is provided.
Warns
-----
UserWarning
Property `optimizer_kwargs` of this Caretaker instance has key `disp`.
"""
warnings.warn(
'This method will be deprecated in future releases of pyFOOMB. Use method `estimate_parallel_MC_sampling` instead',
PendingDeprecationWarning
)
for _item in measurements:
if not isinstance(_item, Measurement):
raise TypeError('Must provide a list of Measurement objects')
# Error handling
if not use_global_optimizer and not isinstance(unknowns, dict):
raise ValueError('Must provide initial guesses when using the local optimizer')
if use_global_optimizer and bounds is None:
raise ValueError('Must provide bounds to use the global optimizer')
if not Helpers.all_measurements_have_errors(measurements):
raise AttributeError('Measurements cannot have no errors')
if isinstance(unknowns, list):
unknowns = {unknown : None for unknown in unknowns}
if self.optimizer_kwargs is not None:
if 'disp' in self.optimizer_kwargs.keys():
warnings.warn(
'Reporting progress for each single optimization job is deactivated for parallel multi-estimation methods',
UserWarning,
)
# calcuate number of estimation jobs
if rel_mc_samples is not None:
n_meas = numpy.sum([len(measurement.timepoints) for measurement in measurements])
mc_samples = int(numpy.ceil(n_meas * rel_mc_samples))
# collect arg instances for parallel jobs
arg_instances = []
for i in range(mc_samples):
_rnd_measurements = self._draw_measurement_samples(measurements, reuse_errors_as_weights)
arg_instances.append(
{
'unknowns' : unknowns,
'measurements' : _rnd_measurements,
'bounds' : bounds,
'metric' : metric,
'handle_CVodeError' : handle_CVodeError,
'use_global_optimizer' : use_global_optimizer,
'reset_afterwards' : True,
'optimizer_kwargs' : {'disp' : False},
}
)
parallel_verbosity = 0
if report_level >= 1:
parallel_verbosity = 1
if report_level >= 2:
parallel_verbosity = 11
# do the jobs
repeated_estimates, results = self._estimate_parallelized_helper(arg_instances, unknowns, parallel_verbosity)
if report_level >= 1:
print('\nSummaries for empirical parameter distributions\n-----------------------------------------------')
print(pandas.DataFrame(repeated_estimates).describe().T)
if report_level >= 2:
_runtimes_min = [result[1]['runtime_min'] for result in results]
print(f'\nAverage runtime per estimation job was {numpy.mean(_runtimes_min):.2f} +/- {numpy.std(_runtimes_min, ddof=1):.2f} min')
return repeated_estimates, results
def get_sensitivities(self,
measurements:List[Measurement]=None, responses:list='all', parameters:list=None,
tfinal:float=None, abs_h:float=None, rel_h:float=1e-3,
handle_CVodeError:bool=True, verbosity_CVodeError:bool=False,
) -> List[Sensitivity]:
"""
Approximates sensitivities of model responses w.r.t. parameters using Central Difference Quotient: f'(x) = f(x+h) - f(x-h) / (2*h).
Indicates how a model response (i.e., a state or observation) changes dynamically in time
with a small change in a certain parameters (i.e., a model parameter, initial value, or observation parameter).
Keyword arguments
-----------------
measurements : List[Measurement]
Can provide a Measurement object for any model state or observation.
Default is None, which implies that `tfinal` cannot be None.
responses : list
Specific model responses (state or observable), for which the sensitivities are requested.
Default is `all´, which causes sensitivities for all model responses.
parameters (list or dict, default=None) : The parameters for which the sensitivities are requested.
In case a dict is provided, the corresponding values will be set.
In case a list is provided, the corresponding values for the current mapping will be used.
tfinal : float
The final integration time.
Default is None, which implies that `measurements` cannot be None.
abs_h : float
Absolute perturbation for central difference quotient. `rel_h` must be set to None for `abs_h` to take effect.
Default is None.
rel_h : float
Relative pertubation for central difference quotient. Overrides use of abs_h.
Absolute perturbation for each parametric sensitivity is then calculated according to: abs_h = rel_h * max(1, |p|).
Default is 1e-3.
handle_CVodeError : bool
Catches CVodeError raised by the solver, in order to not interrupt the estimations for toxic parameter values.
Default is True.
verbosity_CVodeError : bool
Enables informative output during handling CVodeErrors. Default is False.
Returns
-------
sensitivities : List[Sensitivities]
Raises
------
TypeError
Wrong type for kwarg `responses`.
ValueError
Non-unique (case-insensitive) respones given.
ValueError
Non-unique (case-insensitive) parameters given.
ValueError
Given parameters are not known according to the current parameter mapping.
ValueError
Neither measurements dict nor tfinal is provided.
TypeError
Wrong type for kwarg `parameters`.
TypeError
A list containing not only Measurement objects is provided.
"""
if not isinstance(responses, list) and responses != 'all':
raise TypeError('Responses must be either of type list or `all`')
if responses != 'all':
if not Helpers.has_unique_ids(responses):
raise ValueError(Messages.non_unique_ids)
if measurements is not None:
for _item in measurements:
if not isinstance(_item, Measurement):
raise TypeError(f'Must provide a list of Measurement objects: {_item} in {measurements}')
# timepoints for integration
t = numpy.array([])
if measurements is not None:
t = numpy.append(t, Helpers.get_unique_timepoints(measurements))
if tfinal is not None:
tfinal = numpy.array(tfinal)
if t.size == 0:
t = numpy.append(t, tfinal)
elif tfinal > max(t):
t = numpy.append(t, tfinal)
if t.size == 0:
raise ValueError('Must provide either measurements or tfinal')
if t.size == 1:
_simulations = self.simulate(t=t, verbosity=50)
t = Helpers.get_unique_timepoints(_simulations)
# set parameters if provided
_parameter_names = self._get_valid_parameter_names()
if parameters is not None:
if not Helpers.has_unique_ids(parameters):
raise ValueError(Messages.non_unique_ids)
if not set(parameters).issubset(set(_parameter_names)):
raise ValueError(f'Invalid parameters: {set(parameters).difference(set(_parameter_names))}. Valid parameters are: {_parameter_names}.')
if isinstance(parameters, dict):
self.set_parameters(parameters)
elif isinstance(parameters, list):
_parameters = self._get_all_parameters()
parameters = {p : _parameters[p] for p in parameters}
else:
parameters = self._get_all_parameters()
sensitivities = []
for _id in self.replicate_ids:
sensitivities.extend(self._get_sensitivities_parallel(_id, parameters, rel_h, abs_h, t, responses))
return sensitivities
def get_information_matrix(self,
measurements:List[Measurement], estimates:dict,
sensitivities:List[Sensitivity]=None, handle_CVodeError:bool=True, verbosity_CVodeError:bool=False,
) -> numpy.ndarray:
"""
Constructs Fisher information matrix (FIM) by calculating a FIM at each distinct timepoint where at least one measurement was made.
The time-varying FIMs are added up to the FIM. FIM(t) are build using sensitivities, which are approximated using the central difference quotient method.
FIM is of shape (n_estimated_parameters, n_estimated_parameters), with parameters sorted alphabetically (case-insensitive).
Non-intertible FIM indicates that parameter(s) cannot be identified from the given measurements.
Arguments
---------
measurements : List[Measurement]
Can provide a Measurement object for any model state or observation.
estimates : dict
The parameters (model parameters, initial values, observation parameters) that have been estimated previously.
Keyword arguments
-----------------
sensitivities : List[Sensitivity]
These may have been calculated previously using the method `get_sensitivities`.
Default is None, which causes calculation of sensitivities.
handle_CVodeError : bool
Catches CVodeError raised by the solver, in order to not interrupt the estimations for toxic parameter values.
Default is True.
verbosity_CVodeError : bool
Enables informative output during handling CVodeErrors. Default is False.
Returns
-------
FIM : numpy.ndarray
Fisher information matrix is of shape (n_estimated_parameters, n_estimated_parameters),
with values of rows and cols corresponding to the parameters (sorted alphabetically case-insensitive).
Raises
------
TypeError
A list containing not only Measurement objects is provided.
TypeError
A list containing not only Sensitivity objects is provided.
"""
for _item in measurements:
if not isinstance(_item, Measurement):
raise TypeError('Must provide a list of Measurement objects')
if sensitivities is None:
sensitivities = self.get_sensitivities(
measurements=measurements,
parameters=estimates,
handle_CVodeError=handle_CVodeError,
)
else:
for _item in sensitivities:
if not isinstance(_item, Sensitivity):
raise TypeError('Must provide a list of Sensitivity objects')
all_t = Helpers.get_unique_timepoints(measurements)
FIMs = {}
FIM = numpy.full(shape=(len(estimates), len(estimates)), fill_value=0)
for _id in self.replicate_ids:
FIMs[_id] = []
for _t in all_t:
_FIM_t = self._get_information_matrix_at_t(t=_t, measurements=measurements, estimates=estimates, sensitivities=sensitivities, replicate_id=_id)
FIMs[_id].append(_FIM_t)
FIM = FIM + _FIM_t
return FIM
def get_parameter_uncertainties(self,
estimates:dict,
measurements:List[Measurement],
sensitivities:List[Sensitivity]=None,
report_level:int=0,
handle_CVodeError:bool=True,
verbosity_CVodeError:bool=True,
) -> dict:
"""
Calculates uncertainties for estimated parameters, based on variance-covariance matrix derived from sensitivity-based Fisher information matrix.
NOTE: The parameter variance-covariance matrix represents a symmetric, linear approximation to the parameter (co)-variances.
Other methods such a Monte-Carlo sampling can discover non-linear correlations, but require significant computational load.
Arguments
---------
estimates : dict
Dictionary holding the previously estimated parameter values.
measurements : List[Measurement]
The measurements from which the parameters have been estimated.
Keyword arguments
-----------------
sensitivities : List[Sensitivity]
These may have been calculated previously using the method `get_sensitivities`.
Default is None, which causes calculation of sensitivities.
report_level : int
Controls depth of informative output, default is 0 which is no output.
handle_CVodeError : bool
Catches CVodeError raised by the solver, in order to not interrupt the estimations for toxic parameter values.
Default is True.
verbosity_CVodeError : bool
Enables informative output during handling CVodeErrors. Default is False.
Returns
-------
parameter_information : dict
A dictionary summarizing the parameters, their values and standard errors.
Raises
------
TypeError
A list containing not only Measurement objects is provided.
TypeError
A list containing not only Sensitivity objects is provided.
"""
for _item in measurements:
if not isinstance(_item, Measurement):
raise TypeError('Must provide a list of Measurement objects')
if sensitivities is None:
sensitivities = self.get_sensitivities(
measurements=measurements,
parameters=estimates,
handle_CVodeError=handle_CVodeError,
verbosity_CVodeError=verbosity_CVodeError,
)
else:
for _item in sensitivities:
if not isinstance(_item, Sensitivity):
raise TypeError('Must provide a list of Sensitivity objects')
matrices = self.get_parameter_matrices(measurements=measurements, estimates=estimates, sensitivities=sensitivities)
std_errs = numpy.sqrt(numpy.diag(matrices['Cov']))
if report_level>=1:
print('\nEstimated parameters:\n----------')
for _p, _err in zip(sorted(estimates.keys(), key=str.lower), std_errs):
print(f'{_p}: {estimates[_p]:.2e} +/- {_err:.2e} ({abs(_err/estimates[_p]*100):.2f} %)')
parameter_information = {}
parameter_information['Parameters'] = sorted(estimates.keys(), key=str.lower)
parameter_information['Values'] = numpy.array([estimates[_p] for _p in sorted(estimates.keys(), key=str.lower)])
parameter_information['StdErrs'] = std_errs
return parameter_information
def get_optimality_criteria(self, Cov:numpy.ndarray, report_level:int=0) -> dict:
"""
Calculates single-value optimality criteria from a parameter variance-covariance matrix.
Arguments
---------
Cov : numpy.ndarray
The parameter covariance matrix for the estimated parameters.
Keyword arguments
-----------------
report_level : int
Controls informative output on optimality criteria.
Default is 0, which is no print output.
Returns
-------
opt_criteria : dict
The calculated optimality criteria.
"""
criteria = ['A', 'D', 'E', 'E_mod']
cov_evaluator = CovOptimality()
opt_criteria = {_criterion : cov_evaluator.get_value(_criterion, Cov) for _criterion in criteria}
if report_level >=1:
print('\nOptimality criteria:\n----------')
for _criterion in criteria:
print(f'{_criterion}: {opt_criteria[_criterion]:.2e}')
return opt_criteria
def get_parameter_matrices(self,
estimates:dict,
measurements:List[Measurement],
sensitivities:List[Sensitivity]=None,
handle_CVodeError:bool=True,
) -> Dict[str, numpy.ndarray]:
"""
Calculate Fisher information matrix FIM, as well as corresponding variance-covariance matrix Cov and correlation matrix Corr.
Arguments
---------
estimates : dict
Dictionary holding the previously estimated parameter values.
measurements : List[Measurement]
The measurements from which the parameters have been estimated.
Keyword arguments
-----------------
sensitivities : List[Sensitivity]
These may have been calculated previously using the method `get_sensitivities`.
Default is None, which causes calculation of sensitivities.
handle_CVodeError : bool
Catches CVodeError raised by the solver, in order to not interrupt the estimations for toxic parameter values.
Default is True.
Returns
-------
matrices : Dict[str, numpy.ndarray]
The different parameter matrices.
Raises
------
TypeError
A list containing not only Measurement objects is provided.
TypeError
A list containing not only Sensitivity objects is provided.
"""
for _item in measurements:
if not isinstance(_item, Measurement):
raise TypeError('Must provide a list of Measurement objects')
if sensitivities is not None:
for _item in sensitivities:
if not isinstance(_item, Sensitivity):
raise TypeError('Must provide a list of Sensitivity objects')
FIM = self.get_information_matrix(measurements=measurements, estimates=estimates, sensitivities=sensitivities, handle_CVodeError=handle_CVodeError)
try:
Cov = numpy.linalg.inv(FIM)
except LinAlgError:
warnings.warn('Information matrix not invertible.', UserWarning)
Cov = numpy.full(shape=FIM.shape, fill_value=numpy.inf)
Corr = Calculations.cov_into_corr(Cov)
matrices = {}
matrices['FIM'] = FIM
matrices['Cov'] = Cov
matrices['Corr'] = Corr
return matrices
def set_integrator_kwargs(self, integrator_kwargs:dict):
"""
Set some options for the used CVode integrator.
These are propagated to all internally handled ExtendedSimulator instances.
Typical options are `atol` or `rtol`. For all options, see https://jmodelica.org/assimulo/ODE_CVode.html.
Note that not all options may have an effect due to the use of the integrator in this package.
Arguments
---------
integrator_kwargs : dict
The CVode integrator options to be set.
"""
for _id in self.replicate_ids:
self.simulators[_id].integrator_kwargs = integrator_kwargs
def set_parameters(self, parameters:dict):
"""
Assigns specfic values to parameters, according to the current parameter mapping.
Arguments
---------
values : dict
Key-value pairs for parameters that are to be set.
"""
self._parameter_manager.set_parameter_values(parameters)
self._propagate_parameters_through_simulators()
def reset(self):
"""
Resets the Caretaker object to its state after instantiation, preserving the current replicates
"""
self.__init__(
bioprocess_model_class=self.__bioprocess_model_class,
model_parameters=self.__model_parameters,
states=self.__states,
initial_values=self.__initial_values,
replicate_ids=self.__replicate_ids,
initial_switches=self.__initial_switches,
model_name=self.__model_name,
observation_functions_parameters=self.__observation_functions_parameters,
)
def apply_mappings(self, mappings:list):
"""
A list of mappings that are applied to the parameters among the replicates.
An item of the mappings list must either be a tuple with the structure (replicate_id, global_name, local_name, value) or
a ParameterMapper instance according to ParameterMapper(replicate_id=..., global_name=..., local_name=..., value=...).
NOTE: replicate_id can also be a list, which applies the mapping to all replicate in this list.
NOTE: replicate_id can also be `all`, which applies the mapping to all replicates.
Arguments
---------
mappings : list of ParameterMapper and/or tuple
A list of mappings, which can be a tupe or ParameterMapper objects, or a mix of them.
Example: [ParameterMapper(replicate_id=..., global_name=..., local_name=..., value=...), ]
Example: [(replicate_id, global_name, local_name, value), ]
Example : [
ParameterMapper(replicate_id=..., global_name=..., local_name=..., value=...),
(replicate_id, global_name, local_name, value),
]
Raises
------
TypeError
Any mapping is not a tuple or ParameterMapper object.
ValueError
Any mapping has an invalid replicate id.
ValueError
Any mapping has a invalid global parameter name.
Warns
-----
UserWarning
This method is called from implicitly defined single replicate Caretaker objects.
"""
if not (len(self.replicate_ids) == 1 and self.replicate_ids[0] == SINGLE_ID):
self._parameter_manager.apply_mappings(mappings)
self._propagate_parameters_through_simulators()
else:
warnings.warn(
'Parameter mappings cannot be applied for single replicate Caretaker objects. Use `set_parameters()` method instead.',
UserWarning
)
#%% Private methods
def _get_sensitivities_parallel(self, replicate_id:str, parameters:list, rel_h:float, abs_h:float, t:numpy.ndarray, responses:list) -> List[Sensitivity]:
"""
Calculates sensitivities in parallel for a specific replicate_id.
Arguments
---------
replicate_id : str
The replicate_id for which the sensitivities are requested.
parameters : list
The model parameters, initial values and/or observation parameters for which the sensitivites are requested
rel_h : float
The central difference perturbation values, relative to the parameters.
abs_h = float
The absolute central difference perturbation value.
t : numpy.ndarray
The timepoints at which the sensitivities are to be calculated.
responses : list
The model states and/or observations for which the sensitivities are to be calculated.
Returns
-------
List[Sensitivity]
Raises
------
ValueError
Sensitivities are to be calculated for unknown model responses.
"""
# Calculate sensitivities for each replicate
arg_instances = []
if responses == 'all':
_responses = self.simulators[replicate_id]._get_allowed_measurement_keys()
elif not set(responses).issubset(set(self.simulators[replicate_id]._get_allowed_measurement_keys())):
raise ValueError(
f'Invalid model responses for sensitivity calculation. {set(responses).difference(set(self.simulators[replicate_id]._get_allowed_measurement_keys()))}'
)
else:
_responses = responses
# Collect arg instances
for _response in _responses:
for _parameter in parameters:
arg_instance = (replicate_id, _response, _parameter, rel_h, abs_h, parameters, t)
arg_instances.append(arg_instance)
# Do the parallel work for this replicate_id
n_cpus = joblib.cpu_count()
if len(arg_instances) < n_cpus:
n_jobs = len(arg_instances)
else:
n_jobs = n_cpus
with joblib.parallel_backend('loky', n_jobs=n_jobs):
sensitivities = joblib.Parallel(verbose=0)(map(joblib.delayed(self._d_response_i_wrt_d_parameter_j_central_difference), arg_instances))
return list(sensitivities)
def _d_response_i_wrt_d_parameter_j_central_difference(self, arg) -> Sensitivity:
"""
Helper method for parallelization of sensitivity calculation.
"""
replicate_id, response_i, parameter_j, rel_h, abs_h, parameters, t = arg
# set parameter perturbation
if rel_h is not None:
abs_h = rel_h * max([1, abs(parameters[parameter_j])])
if abs_h*1.1 <= EPS64:
warnings.warn(f'Parameter perturbation for finite differences is in the same order of machine precision. {abs_h} vs. {EPS64}', UserWarning)
# forward simulations with parameter forward perturbation
_pars_plus = copy.deepcopy(parameters)
_pars_plus[parameter_j] = _pars_plus[parameter_j] + abs_h
self.set_parameters(_pars_plus)
simulation_plus = self.simulators[replicate_id].simulate(t=t, verbosity=50)
simulation_plus_resp_i = Helpers.extract_time_series(simulation_plus, name=response_i, replicate_id=replicate_id)
y_plus = simulation_plus_resp_i.values
# forward simulations with parameter backward perturbation
_pars_minus = copy.deepcopy(parameters)
_pars_minus[parameter_j] = _pars_minus[parameter_j] - abs_h
self.set_parameters(_pars_minus)
simulation_minus = self.simulators[replicate_id].simulate(t=t, verbosity=50)
simulation_minus_resp_i = Helpers.extract_time_series(simulation_minus, name=response_i, replicate_id=replicate_id)
y_minus = simulation_minus_resp_i.values
# approx fprime
dyi_dthetai = (y_plus - y_minus) / (2 * abs_h)
timepoints = simulation_plus_resp_i.timepoints
return Sensitivity(timepoints=timepoints, values=dyi_dthetai, response=response_i, parameter=parameter_j, h=abs_h, replicate_id=replicate_id)
def _draw_measurement_samples(self, measurements:List[Measurement], reuse_errors_as_weights:bool=True) -> List[Measurement]:
"""
Helper method for `estimate_MC_sampling` method.
Arguments
---------
measurements : List[Measurement]
The measurements from which the parameters have been estimated.
Assumes that this argument was run through method `utils.Helpers.check_kinetic_data_dict()`
Keyword arguments
-----------------
reuse_errors_as_weights : bool
Uses the measurement errors as weights for each set of measurement samples drawn.
Default is True.
Returns
-------
rnd_measurements : List[Measurement]
A copy of the `measurements` argument, but with its `values` property replaced with random values,
according to its `distribution` property. Its `errors` property is replaced by a vector of ones,
if the keyword argument `reuse_errors_as_weights` was False.
"""
rnd_measurements = copy.deepcopy(measurements)
for i in range(len(measurements)):
_rnd_values = measurements[i]._get_random_samples_values()
rnd_measurements[i].values = _rnd_values
if not reuse_errors_as_weights:
rnd_measurements[i].errors = numpy.ones_like(_rnd_values)
return rnd_measurements
def _estimate_parallelized_helper(self, arg_instances, unknowns, parallel_verbosity):
"""
Helper method for `estimate_MC_sampling` and `estimate_repeatedly` methods.
"""
jobs = len(arg_instances)
# do the jobs
n_cpus = joblib.cpu_count()
if jobs < n_cpus:
n_jobs = jobs
else:
n_jobs = n_cpus
with joblib.parallel_backend('loky', n_jobs=n_jobs):
results = joblib.Parallel(verbose=parallel_verbosity)(map(joblib.delayed(self._parallel_estimate_wrapper), arg_instances))
# collect returns
repeated_estimates = {p : [] for p in unknowns.keys()}
for result in results:
for p in repeated_estimates.keys():
repeated_estimates[p].append(result[0][p])
return repeated_estimates, results
def _get_all_parameters(self) -> dict:
"""
Get all currently specified local parameters and their values.
"""
parameters = {}
for _p in self._parameter_manager._parameters:
parameters[_p.local_name] = _p.value
return {_p : parameters[_p] for _p in sorted(parameters.keys(), key=str.lower)}
def _get_information_matrix_at_t(self, t:float, measurements:List[Measurement], estimates:dict, sensitivities:List[Sensitivity], replicate_id:str) -> numpy.ndarray:
"""
Calculates Fisher information matrix a timepoint t.
Arguments
---------
t : float
Timepoint at which the FIM is calculated
measurements : List[Measurement]
The measurements from which the parameters have been estimated.
estimates : dict
The parameters which have been estimated from the measurements.
sensitivities : List[Sensitivity]
Sensitivities that have been calculated from the measurements and estimated parameters.
Returns
-------
FIM_t : numpy.ndarray
Fisher information matrix at timepoint t, has size n_parameters x n_parameters
Raises
------
AttributeError
Measurement objects have no error property set.
"""
if not Helpers.all_measurements_have_errors(measurements):
raise AttributeError('Measurement errors property not set.')
measured_responses = sorted(set([measurement.name for measurement in measurements]), key=str.lower)
estimated_parameters = sorted(estimates.keys(), key=str.lower)
S_t = numpy.full(shape=(len(measured_responses), len(estimated_parameters)), fill_value=numpy.nan)
err = numpy.full(shape=len(measured_responses), fill_value=numpy.nan)
for i, _measured_response in enumerate(measured_responses):
for j, _parameter in enumerate(estimated_parameters):
_sensitivity = Helpers.extract_time_series(
sensitivities,
name=f'd({_measured_response})/d({_parameter})',
replicate_id=replicate_id,
)
_measurement = Helpers.extract_time_series(
measurements,
name=_measured_response,
replicate_id=replicate_id,
)
# only account for this response if it has been measured
if _measurement is not None and t in _measurement.timepoints:
S_t[i, j] = _sensitivity.values[numpy.argwhere(_sensitivity.timepoints==t)]
err[i] = _measurement.errors[numpy.argwhere(_measurement.timepoints==t)]
else:
S_t[i, j] = 0
err[i] = numpy.inf
Sigma_t = numpy.diag( | numpy.square(err) | numpy.square |
import os
import json
import numpy as np
import math
import time
from math import inf
import bpy
from mathutils import Vector
from . import animation
import importlib
importlib.reload(animation)
# convert deg to rad
DEG_TO_RAD = math.pi / 180.0
# direction names for minecraft cube face UVs
DIRECTIONS = np.array([
"north",
"east",
"west",
"south",
"up",
"down",
])
# normals for minecraft directions in BLENDER world space
# e.g. blender (-1, 0, 0) is minecraft north (0, 0, -1)
# shape (f,n,v) = (6,6,3)
# f = 6: number of cuboid faces to test
# n = 6: number of normal directions
# v = 3: vector coordinates (x,y,z)
DIRECTION_NORMALS = np.array([
[-1., 0., 0.],
[ 0., 1., 0.],
[ 0., -1., 0.],
[ 1., 0., 0.],
[ 0., 0., 1.],
[ 0., 0., -1.],
])
DIRECTION_NORMALS = np.tile(DIRECTION_NORMALS[np.newaxis,...], (6,1,1))
def index_of(val, in_list):
"""Return index of value in in_list"""
try:
return in_list.index(val)
except ValueError:
return -1
def merge_dict_properties(dict_original, d):
"""Merge inner dict properties"""
for k in d:
if k in dict_original and isinstance(dict_original[k], dict):
dict_original[k].update(d[k])
else:
dict_original[k] = d[k]
return dict_original
def get_base_path(filepath_parts, curr_branch=None, new_branch=None):
""""Typical path formats for texture is like:
"textures": {
"skin": "entity/wolf/wolf9"
},
Matches the base path before branch, then get base path of other branch
curr_branch = "shapes"
new_branch = "textures"
filepath_parts = ["C:", "vs", "resources", "shapes", "entity", "land", "wolf-male.json"]
|
Matched branch point
new base path = ["C:", "vs", "resources"] + ["textures"]
"""
# match base path
idx_base_path = index_of(curr_branch, filepath_parts)
if idx_base_path != -1:
# system agnostic path join
joined_path = os.path.join(os.sep, filepath_parts[0] + os.sep, *filepath_parts[1:idx_base_path], new_branch)
return joined_path
else:
return "" # failed
def create_textured_principled_bsdf(mat_name, tex_path):
"""Create new material with `mat_name` and texture path `tex_path`
"""
mat = bpy.data.materials.new(mat_name)
mat.use_nodes = True
node_tree = mat.node_tree
nodes = node_tree.nodes
bsdf = nodes.get("Principled BSDF")
# add texture node
if bsdf is not None:
if "Base Color" in bsdf.inputs:
tex_input = nodes.new(type="ShaderNodeTexImage")
tex_input.interpolation = "Closest"
# load image, if fail make a new image with filepath set to tex path
try:
img = bpy.data.images.load(tex_path, check_existing=True)
except:
print("FAILED TO LOAD IMAGE:", tex_path)
img = bpy.data.images.new(os.path.split(tex_path)[-1], width=16, height=16)
img.filepath = tex_path
tex_input.image = img
node_tree.links.new(tex_input.outputs[0], bsdf.inputs["Base Color"])
# disable shininess
if "Specular" in bsdf.inputs:
bsdf.inputs["Specular"].default_value = 0.0
return mat
def parse_element(
e,
parent_cube_origin,
parent_rotation_origin, # = blender origin
textures,
tex_width=16.0,
tex_height=16.0,
import_uvs=True,
):
"""Load a single element into a Blender object.
Note vintage story cube origins are relative to the parent's
"from" corner, the origin input is the parent cube's from vertex.
from to (relative to parent_cube_origin)
| |
v v
| x | child
|_______|
|
| xp <------------parent rotation origin
|.____ parent (blender origin)
parent_cube_origin
.
(0,0,0)
New locations in blender space:
child_blender_origin = parent_cube_origin - parent_rotation_origin + child_rotation_origin
from_blender_local = from - child_rotation_origin
to_blender_local = to - child_rotation_origin
Return tuple of:
obj, # new Blender object
local_cube_origin, # local space "to" cube corner origin
new_cube_origin, # global space cube corner origin
new_rotation_origin # local space rotation (Blender) origin
"""
# get cube min/max
v_min = np.array([e["from"][2], e["from"][0], e["from"][1]])
v_max = np.array([e["to"][2], e["to"][0], e["to"][1]])
# get rotation origin
location = np.array([
parent_cube_origin[0] - parent_rotation_origin[0],
parent_cube_origin[1] - parent_rotation_origin[1],
parent_cube_origin[2] - parent_rotation_origin[2],
])
if "rotationOrigin" in e: # add rotation origin
child_rotation_origin = np.array([
e["rotationOrigin"][2],
e["rotationOrigin"][0],
e["rotationOrigin"][1],
])
location = location + child_rotation_origin
else:
child_rotation_origin = np.array([0.0, 0.0, 0.0])
# this cube corner origin
new_cube_origin = parent_cube_origin + v_min
new_rotation_origin = parent_rotation_origin + location
# get euler rotation
rot_euler = np.array([0.0, 0.0, 0.0])
if "rotationX" in e:
rot_euler[1] = e["rotationX"] * DEG_TO_RAD
if "rotationY" in e:
rot_euler[2] = e["rotationY"] * DEG_TO_RAD
if "rotationZ" in e:
rot_euler[0] = e["rotationZ"] * DEG_TO_RAD
# create cube
bpy.ops.mesh.primitive_cube_add(location=location, rotation=rot_euler)
obj = bpy.context.active_object
mesh = obj.data
mesh_materials = {} # tex_name => material_index
# center local mesh coordiantes
v_min = v_min - child_rotation_origin
v_max = v_max - child_rotation_origin
# set vertices
mesh.vertices[0].co[:] = v_min[0], v_min[1], v_min[2]
mesh.vertices[1].co[:] = v_min[0], v_min[1], v_max[2]
mesh.vertices[2].co[:] = v_min[0], v_max[1], v_min[2]
mesh.vertices[3].co[:] = v_min[0], v_max[1], v_max[2]
mesh.vertices[4].co[:] = v_max[0], v_min[1], v_min[2]
mesh.vertices[5].co[:] = v_max[0], v_min[1], v_max[2]
mesh.vertices[6].co[:] = v_max[0], v_max[1], v_min[2]
mesh.vertices[7].co[:] = v_max[0], v_max[1], v_max[2]
# set face uvs
uv = e.get("faces")
if uv is not None:
if import_uvs:
face_normals = np.zeros((6,1,3))
for i, face in enumerate(mesh.polygons):
face_normals[i,0,0:3] = face.normal
# map face normal -> face name
# NOTE: this process may not be necessary since new blender
# objects are created with the same face normal order,
# so could directly map index -> minecraft face name.
# keeping this in case the order changes in future
face_directions = np.argmax(np.sum(face_normals * DIRECTION_NORMALS, axis=2), axis=1)
face_directions = DIRECTIONS[face_directions]
# set uvs face order in blender loop, determined experimentally
uv_layer = mesh.uv_layers.active.data
for uv_direction, face in zip(face_directions, mesh.polygons):
face_uv = uv.get(uv_direction)
if face_uv is not None:
if "uv" in face_uv:
# unpack uv coords in minecraft coord space [xmin, ymin, xmax, ymax]
# transform from minecraft [0, 16] space +x,-y space to blender [0,1] +x,+y
face_uv_coords = face_uv["uv"]
xmin = face_uv_coords[0] / tex_width
ymin = 1.0 - face_uv_coords[3] / tex_height
xmax = face_uv_coords[2] / tex_width
ymax = 1.0 - face_uv_coords[1] / tex_height
else:
xmin = 0.0
ymin = 1.0
xmax = 1.0
ymax = 0.0
# write uv coords based on rotation
k = face.loop_start
if "rotation" not in face_uv or face_uv["rotation"] == 0:
uv_layer[k].uv[0:2] = xmax, ymin
uv_layer[k+1].uv[0:2] = xmax, ymax
uv_layer[k+2].uv[0:2] = xmin, ymax
uv_layer[k+3].uv[0:2] = xmin, ymin
elif face_uv["rotation"] == 90:
uv_layer[k].uv[0:2] = xmax, ymax
uv_layer[k+1].uv[0:2] = xmin, ymax
uv_layer[k+2].uv[0:2] = xmin, ymin
uv_layer[k+3].uv[0:2] = xmax, ymin
elif face_uv["rotation"] == 180:
uv_layer[k].uv[0:2] = xmin, ymax
uv_layer[k+1].uv[0:2] = xmin, ymin
uv_layer[k+2].uv[0:2] = xmax, ymin
uv_layer[k+3].uv[0:2] = xmax, ymax
elif face_uv["rotation"] == 270:
uv_layer[k].uv[0:2] = xmin, ymin
uv_layer[k+1].uv[0:2] = xmax, ymin
uv_layer[k+2].uv[0:2] = xmax, ymax
uv_layer[k+3].uv[0:2] = xmin, ymax
else: # invalid rotation, should never occur... do default
uv_layer[k].uv[0:2] = xmax, ymin
uv_layer[k+1].uv[0:2] = xmax, ymax
uv_layer[k+2].uv[0:2] = xmin, ymax
uv_layer[k+3].uv[0:2] = xmin, ymin
# assign material
if "texture" in face_uv:
tex_name = face_uv["texture"][1:] # remove the "#" in start
if tex_name in mesh_materials:
face.material_index = mesh_materials[tex_name]
elif tex_name in textures: # need new mapping
idx = len(obj.data.materials)
obj.data.materials.append(textures[tex_name])
mesh_materials[tex_name] = idx
face.material_index = idx
# set name (choose whatever is available or "cube" if no name or comment is given)
obj.name = e.get("name") or "cube"
return obj, v_min, new_cube_origin, new_rotation_origin
def parse_attachpoint(
e, # json element
parent_cube_origin, # cube corner origin of parent
):
"""Load attachment point associated with a cube, convert
into a Blender empty object with special name:
"attach_AttachPointName"
where the suffix is the "code": "AttachPointName" in the element.
This format is used for exporting attachpoints from Blender.
Location in json is relative to cube origin not rotation origin.
For some reason json number is a string...wtf?
"""
px = float(e.get("posX") or 0.0)
py = float(e.get("posY") or 0.0)
pz = float(e.get("posZ") or 0.0)
rx = float(e.get("rotationX") or 0.0)
ry = float(e.get("rotationY") or 0.0)
rz = float(e.get("rotationZ") or 0.0)
# get location, rotation converted to Blender space
location = np.array([
pz + parent_cube_origin[0],
px + parent_cube_origin[1],
py + parent_cube_origin[2],
])
rotation = DEG_TO_RAD * np.array([rz, rx, ry])
# create object
bpy.ops.object.empty_add(type="ARROWS", radius=1.0, location=location, rotation=rotation)
obj = bpy.context.active_object
obj.show_in_front = True
obj.name = "attach_" + (e.get("code") or "attachpoint")
return obj
def rebuild_hierarchy_with_bones(
root_objects,
):
"""Wrapper to make armature and replace cubes in hierarchy
with bones. This is multi step process due to how Blender
EditBone and PoseBones work
"""
bpy.ops.object.mode_set(mode="OBJECT") # ensure correct starting context
bpy.ops.object.add(type="ARMATURE", enter_editmode=True)
armature = bpy.context.active_object
armature.show_in_front = True
for obj in root_objects:
add_bone_to_armature_from_object(
obj,
armature,
None,
)
# switch to pose mode, set bone positions from object transform
bpy.ops.object.mode_set(mode="POSE")
for obj in root_objects:
set_bone_pose_from_object(
obj,
armature,
)
# set rest pose, not sure if we want this or not...
bpy.ops.pose.armature_apply()
bpy.ops.object.mode_set(mode="OBJECT")
return armature
def add_bone_to_armature_from_object(
obj,
armature,
parent_bone,
):
# skip non mesh (e.g. attach points)
if not isinstance(obj.data, bpy.types.Mesh):
return
bone = armature.data.edit_bones.new(obj.name)
# this orients bone to blender XYZ
bone.head = (0., 0., 0.)
bone.tail = (0., 1., 0.)
if parent_bone is not None:
bone.parent = parent_bone
bone.use_connect = False
for child in obj.children:
add_bone_to_armature_from_object(
child,
armature,
bone,
)
def set_bone_pose_from_object(
obj,
armature,
):
# skip non mesh (e.g. attach points)
if not isinstance(obj.data, bpy.types.Mesh):
return
name = obj.name
pose_bone = armature.pose.bones[name]
pose_bone.location = (
obj.location.x,
obj.location.y,
obj.location.z,
)
pose_bone.rotation_mode = "XYZ"
pose_bone.rotation_euler = obj.rotation_euler
# now parent obj to bone
obj.parent = armature
obj.parent_type = "BONE"
obj.parent_bone = name
obj.location = (0., -1.0, 0.)
obj.rotation_euler = (0., 0., 0.)
for child in obj.children:
set_bone_pose_from_object(
child,
armature,
)
def parse_animation(
e, # json element
armature, # armature to associate action with
stats, # import stats
):
def add_keyframe_point(fcu, frame, val):
"""Helper to add keyframe point to fcurve"""
idx = len(fcu.keyframe_points)
fcu.keyframe_points.add(1)
fcu.keyframe_points[idx].interpolation = "LINEAR"
fcu.keyframe_points[idx].co = frame, val
name = e["code"] # use code as name instead of name field
action = bpy.data.actions.new(name=name)
# flag to repeat animation (insert duplicate keyframe at end)
repeat_animation = False
# add special marker for onActivityStopped and onAnimationEnd
num_frames = e.get("quantityframes") or 0
if "onAnimationEnd" in e:
marker = action.pose_markers.new(name="onAnimationEnd_{}".format(e["onAnimationEnd"]))
marker.frame = num_frames - 1
if e["onAnimationEnd"].lower() != "hold": # death animations hold on finish
repeat_animation = True
if "onActivityStopped" in e:
marker = action.pose_markers.new(name="onActivityStopped_{}".format(e["onActivityStopped"]))
marker.frame = num_frames + 20
# load keyframe data
animation_adapter = animation.AnimationAdapter(action, name=name)
# insert first keyframe at end to properly loop
keyframes = e["keyframes"].copy()
if repeat_animation and len(keyframes) > 0 and num_frames > 0:
# make copy of frame 0 and insert at num_frames-1
keyframe_0_copy = {
"frame": num_frames - 1,
"elements": keyframes[0]["elements"],
}
keyframes.append(keyframe_0_copy)
for keyframe in keyframes:
frame = keyframe["frame"]
for bone, data in keyframe["elements"].items():
fcu_name_prefix = "pose.bones[\"{}\"]".format(bone)
fcu_name_location = fcu_name_prefix + ".location"
fcu_name_rotation = fcu_name_prefix + ".rotation_euler"
# add bone => rotation mode
animation_adapter.set_bone_rotation_mode(bone, "rotation_euler")
# position fcurves
fcu_px = animation_adapter.get(fcu_name_location, 0)
fcu_py = animation_adapter.get(fcu_name_location, 1)
fcu_pz = animation_adapter.get(fcu_name_location, 2)
# euler rotation fcurves
fcu_rx = animation_adapter.get(fcu_name_rotation, 0)
fcu_ry = animation_adapter.get(fcu_name_rotation, 1)
fcu_rz = animation_adapter.get(fcu_name_rotation, 2)
# add keyframe points (note vintage story ZXY -> XYZ)
if "offsetX" in data:
add_keyframe_point(fcu_py, frame, data["offsetX"])
if "offsetY" in data:
add_keyframe_point(fcu_pz, frame, data["offsetY"])
if "offsetZ" in data:
add_keyframe_point(fcu_px, frame, data["offsetZ"])
if "rotationX" in data:
add_keyframe_point(fcu_ry, frame, data["rotationX"] * DEG_TO_RAD)
if "rotationY" in data:
add_keyframe_point(fcu_rz, frame, data["rotationY"] * DEG_TO_RAD)
if "rotationZ" in data:
add_keyframe_point(fcu_rx, frame, data["rotationZ"] * DEG_TO_RAD)
# resample animations for blender
animation_adapter.resample_to_blender()
# update stats
stats.animations += 1
def load_element(
element,
parent,
cube_origin,
rotation_origin,
all_objects,
textures,
tex_width=16.0,
tex_height=16.0,
import_uvs=True,
stats=None,
):
"""Recursively load a geometry cuboid"""
obj, local_cube_origin, new_cube_origin, new_rotation_origin = parse_element(
element,
cube_origin,
rotation_origin,
textures,
tex_width=tex_width,
tex_height=tex_height,
import_uvs=import_uvs,
)
all_objects.append(obj)
# set parent
if parent is not None:
obj.parent = parent
# increment stats (debugging)
if stats:
stats.cubes += 1
# parse attach points
if "attachmentpoints" in element:
for attachpoint in element["attachmentpoints"]:
p = parse_attachpoint(
attachpoint,
local_cube_origin,
)
p.parent = obj
all_objects.append(p)
# increment stats (debugging)
if stats:
stats.attachpoints += 1
# recursively load children
if "children" in element:
for child in element["children"]:
load_element(
child,
obj,
new_cube_origin,
new_rotation_origin,
all_objects,
textures,
tex_width,
tex_height,
import_uvs,
stats=stats,
)
return obj
class ImportStats():
"""Track statistics on imported data"""
def __init__(self):
self.cubes = 0
self.attachpoints = 0
self.animations = 0
self.textures = 0
def load(context,
filepath,
import_uvs=True, # import face uvs
import_textures=True, # import textures into materials
import_animations=True, # load animations
translate_origin=None, # origin translate either [x, y, z] or None
recenter_to_origin=False, # recenter model to origin, overrides translate origin
debug_stats=True, # print statistics on imported models
**kwargs
):
"""Main import function"""
# debug
t_start = time.process_time()
stats = ImportStats() if debug_stats else None
with open(filepath, "r") as f:
s = f.read()
try:
data = json.loads(s)
except Exception as err:
# sometimes format is in loose json, `name: value` instead of `"name": value`
# this tries to add quotes to keys without double quotes
# this simple regex fails if any strings contain colons
try:
import re
s2 = re.sub("(\w+):", r'"\1":', s)
data = json.loads(s2)
# unhandled issue
except Exception as err:
raise err
# data = json.load(f)
# chunks of import file path, to get base directory
filepath_parts = filepath.split(os.path.sep)
# check if groups in .json, not a spec, used by this exporter as additional data to group models together
if "groups" in data:
groups = data["groups"]
else:
groups = {}
# objects created
root_objects = [] # root level objects
all_objects = [] # all objects added
# vintage story coordinate system origin
if translate_origin is not None:
translate_origin = Vector(translate_origin)
# set scene collection as active
scene_collection = bpy.context.view_layer.layer_collection
bpy.context.view_layer.active_layer_collection = scene_collection
# =============================================
# import textures, create map of material name => material
# =============================================
"""Assume two types of texture formats:
"textures:" {
"down": "#bottom", # texture alias to another texture
"bottom": "block/stone", # actual texture image
}
Loading textures is two pass:
1. load all actual texture images
2. map aliases to loaded texture images
"""
tex_width = data["textureWidth"] if "textureWidth" in data else 16.0
tex_height = data["textureHeight"] if "textureHeight" in data else 16.0
textures = {}
if import_textures and "textures" in data:
# get textures base path for models
tex_base_path = get_base_path(filepath_parts, curr_branch="shapes", new_branch="textures")
# load texture images
for tex_name, tex_path in data["textures"].items():
# skip aliases
if tex_path[0] == "#":
continue
filepath_tex = os.path.join(tex_base_path, *tex_path.split("/")) + ".png"
textures[tex_name] = create_textured_principled_bsdf(tex_name, filepath_tex)
# update stats
if stats:
stats.textures += 1
# map texture aliases
for tex_name, tex_path in data["textures"].items():
if tex_path[0] == "#":
tex_path = tex_path[1:]
if tex_path in textures:
textures[tex_name] = textures[tex_path]
# =============================================
# recursively import geometry, uvs
# =============================================
root_origin = np.array([0.0, 0.0, 0.0])
root_elements = data["elements"]
for e in root_elements:
obj = load_element(
e,
None,
root_origin,
root_origin,
all_objects,
textures,
tex_width=tex_width,
tex_height=tex_height,
import_uvs=True,
stats=stats,
)
root_objects.append(obj)
# =============================================
# model post-processing
# =============================================
if recenter_to_origin:
# model bounding box vector
model_v_min = np.array([inf, inf, inf])
model_v_max = np.array([-inf, -inf, -inf])
# re-used buffer
v_world = np.zeros((3, 8))
# get bounding box
for obj in root_objects:
mesh = obj.data
mat_world = obj.matrix_world
for i, v in enumerate(mesh.vertices):
v_world[0:3,i] = mat_world @ v.co
model_v_min = np.amin( | np.append(v_world, model_v_min[...,np.newaxis], axis=1) | numpy.append |
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.image import imread
from mpl_toolkits.mplot3d import Axes3D
# 与门 权重偏置
AND_W = np.array([0.5, 0.5])
AND_B = -0.7
# 与非门 权重偏置
NAND_W = np.array([-0.5, -0.5])
NAND_B = 0.7
# 或门 权重偏置
OR_W = np.array([0.5, 0.5])
OR_B = -0.3
# 感知机
def Perceptron(x):
return np.array(x > 0, dtype=np.int)
# sigmoid
def Sigmoid(x):
return 1 / (1 + np.exp(-x))
# Relu
def Relu(x):
return np.maximum(0, x)
# 这个激活函数在"分类"中,比较常用
def Softmax(a):
c=np.max(a)
exp_a = np.exp(a-c)
y = exp_a / np.sum(exp_a)
return y
# 异或门
def XOR(x1, x2):
local_x1 = Perceptron(np.sum(np.array([x1, x2])*NAND_W)+NAND_B)
local_x2 = Perceptron(np.sum(np.array([x1, x2])*OR_W)+OR_B)
return Sigmoid(np.sum(np.array([local_x1, local_x2])*AND_W)+AND_B)
def test1():
print(XOR(0, 0))
print(XOR(1, 0))
print(XOR(0, 1))
print(XOR(1, 1))
def test2():
x = np.arange(-10, 10, 0.1)
y1 = Perceptron(x)
y2 = Sigmoid(x)
plt.plot(x, x,linestyle='--',label="x")
plt.plot(x, y1,label="Perceptron")
plt.plot(x, y2,label="Sigmoid")
plt.xlabel("X")
plt.ylabel("Y")
plt.ylim(-0.01, 1.01) # 指定y轴的范围
plt.legend()
plt.show()
#第三章
def identity_function(x):
return x
def test3():
# 输入层
X = np.array([1.0, 0.5])#输入层
W1 = np.array([[0.1, 0.3, 0.5], [0.2, 0.4, 0.6]])#输入层权重
B1 = np.array([0.1, 0.2, 0.3])#输入层偏置
A1 = np.dot(X, W1) + B1
Z1 = Sigmoid(A1)
#print(A1) # [0.3, 0.7, 1.1]
#print(Z1) # [0.57444252, 0.66818777, 0.75026011]
# 隐含层
W2 = np.array([[0.1, 0.4], [0.2, 0.5], [0.3, 0.6]])
B2 = np.array([0.1, 0.2])
A2 = np.dot(Z1, W2) + B2
Z2 = Sigmoid(A2)
# 输出层
W3 = np.array([[0.1, 0.3], [0.2, 0.4]])
B3 = np.array([0.1, 0.2])
A3 = np.dot(Z2, W3) + B3
Y = identity_function(A3) # 或者Y = A3
print(Y)
# 把test3规整下:
# 网络权重和偏置
def init_network():
network = {}
network['W1'] = np.array([[0.1, 0.3, 0.5], [0.2, 0.4, 0.6]])
network['b1'] = np.array([0.1, 0.2, 0.3])
network['W2'] = np.array([[0.1, 0.4], [0.2, 0.5], [0.3, 0.6]])
network['b2'] = np.array([0.1, 0.2])
network['W3'] = np.array([[0.1, 0.3], [0.2, 0.4]])
network['b3'] = np.array([0.1, 0.2])
return network
# 前向运算
def forward(network, x):
W1, W2, W3 = network['W1'], network['W2'], network['W3']
b1, b2, b3 = network['b1'], network['b2'], network['b3']
a1 = np.dot(x, W1) + b1
z1 = Sigmoid(a1)
a2 = np.dot(z1, W2) + b2
z2 = Sigmoid(a2)
a3 = np.dot(z2, W3) + b3
y = identity_function(a3)
return y
# 测试数据
def test4():
network = init_network()
x = np.array([1.0, 0.5])
y = forward(network, x)
print(y) # [ 0.31682708 0.69627909]
def test5():
a= | np.array([0.3,2.9,4.0]) | numpy.array |
import tensorflow as tf
import numpy as np
np.set_printoptions(threshold=np.nan)
import matplotlib.pyplot as plt
import data_handler as dh
import os
import sys
class MDN(object):
'''
Contains useful methods for training, testing, and validating a mixture
density network.
'''
def __init__(self, session, input_size, num_gaussians=3, num_lstm_cells=300,
save=False):
'''
Sets up the computation graph for the MDN.
Bishop, et. al, use a mixture of univariate gaussians, which allows them
to avoid futzing around with off-diagonal covariance matrix terms.
Univariate gaussians have proven to be insufficient for prediction, so this
model uses full covariance matrices for the mixture components.
'''
dtype = tf.float32
self.session = session
self.weights = []
self.biases = []
self.layers = []
self.init_states = []
self.num_lstm_layers = 3
self.num_gaussians = num_gaussians
self.input_size = input_size
self.l2_penalty = 0.00001
num_means = num_gaussians*(input_size - 1)
num_variances = num_gaussians*(input_size - 1)
num_correlations = num_gaussians*(1)
output_size = num_gaussians + num_means + num_variances \
+ num_correlations + 1
print("output size:", output_size)
print("output size per gaussian:", (output_size - 1)/num_gaussians)
with tf.variable_scope("mdn"):
self.input_data = tf.placeholder(dtype=dtype,
shape=[None, None, input_size], name="batch_input")
self.output_data = tf.placeholder(dtype=dtype,
shape=[None, None, input_size], name="batch_targets")
batch_size = tf.shape(self.input_data)[0]
seq_length = tf.shape(self.input_data)[1]
print("batch_size info: ", batch_size)
# For each layer of lstm's, create a set of placeholders to contain
# values passed to each lstm cell's initial recurrent state.
for i in range(self.num_lstm_layers):
ph_c = tf.placeholder(dtype=dtype, shape=[None, num_lstm_cells])
ph_h = tf.placeholder(dtype=dtype, shape=[None, num_lstm_cells])
self.init_states.append(
tf.nn.rnn_cell.LSTMStateTuple(ph_c, ph_h))
self.init_states = tuple(self.init_states)
lstm_layers = []
for i in range(self.num_lstm_layers):
lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(num_lstm_cells)
if i < self.num_lstm_layers - 1:
self.layers.append(lstm_cell)
lstm_layers.append(self.layers[-1])
else:
self.layers.append(tf.contrib.rnn.ResidualWrapper(lstm_cell))
lstm_layers.append(self.layers[-1])
# Get a list of LSTM cells in the current set of layers, then pass those
# to the MultiRNNCell method.
self.multi_lstm_cell = tf.nn.rnn_cell.MultiRNNCell(lstm_layers)
# LSTM layers
outputs, self.last_lstm_state = \
tf.nn.dynamic_rnn(self.multi_lstm_cell, self.input_data, dtype=dtype,
initial_state=self.init_states)
self.zero_states = self.multi_lstm_cell.zero_state(batch_size, dtype=dtype)
outputs_flat = tf.reshape(outputs, [-1, num_lstm_cells], name="dynamic_rnn_reshape")
self.layers.append(outputs_flat)
# Output layer
shape = [num_lstm_cells, output_size]
self.layers.append(self._linear_op(self.layers[-1], shape))
# Get the mixture components
splits = [num_means, num_variances, num_correlations, num_gaussians, 1]
pieces = tf.split(self.layers[-1], splits, axis=1)
self.means = pieces[0]
self.stdevs = tf.nn.softplus(pieces[1])
self.correls = 0.9999*tf.nn.tanh(pieces[2])
self.mix_weights = tf.nn.softmax(pieces[3])
self.stroke = tf.nn.sigmoid(pieces[4])
# Reshape the means, stdevs, correlations, and mixture weights for
# friendly returns
means_shape = [batch_size, seq_length, num_gaussians, 2]
stdevs_shape = [batch_size, seq_length, num_gaussians, 2]
mixes_shape = [batch_size, seq_length, num_gaussians, 1]
correls_shape = [batch_size, seq_length, num_gaussians, 2]
self.means_ = tf.reshape(self.means, means_shape)
self.stdevs_ = tf.reshape(self.stdevs, stdevs_shape)
self.mix_weights_ = tf.reshape(self.mix_weights, mixes_shape)
self.correls_ = tf.reshape(self.correls, correls_shape)
outputs_flat = tf.reshape(self.output_data, [-1, input_size])
gauss_values, stroke = tf.split(outputs_flat, [input_size-1, 1], axis=1)
# Grab these for sampling from the network.
self.gauss_params = \
self._get_gaussian_params(self.means, self.stdevs, self.correls,
num_gaussians)
# These are for training or evaluating the network.
self.gauss_evals = self._eval_gaussians(gauss_values, self.means,
self.stdevs, self.correls,
num_gaussians)
print(self.gauss_evals.shape, self.mix_weights.shape)
print(pieces[3].shape)
self.mixture = tf.reduce_sum(self.gauss_evals*self.mix_weights, axis=-1)
stroke_loss = \
tf.nn.sigmoid_cross_entropy_with_logits(labels=stroke, logits=pieces[4])
print("unreduced stroke loss shape:", stroke_loss.shape)
#a = self.gauss_evals*self.mix_weights
#print("unreduced mixture shape:", a.shape)
self.stroke_loss = tf.reduce_sum(stroke_loss, axis=-1)
print(self.stroke_loss.shape)
self.loss = tf.reduce_mean(-1*tf.log(self.mixture + 1e-8) + self.stroke_loss, name="loss")
self.loss += self.l2_penalty*tf.reduce_sum([tf.nn.l2_loss(v) for v in tf.trainable_variables()])
# Need to clip gradients (?)
optimizer = tf.train.RMSPropOptimizer(learning_rate=0.0004)
self.train_op = optimizer.minimize(self.loss)
if save:
self.saver = tf.train.Saver()
def _get_weights(self, shape, name="requested_weight"):
'''
Returns a location of a Rank(2) tensor of weights and a Rank(1) tensor of
biases within the self.weights and self.biases lists.
'''
weights = tf.Variable(tf.random_normal(shape, stddev=0.1), name=name)
biases = tf.Variable(tf.random_normal([shape[-1]], stddev=0.1), name=name)
self.weights.append(weights)
self.biases.append(biases)
return (len(self.weights) - 1, len(self.biases) - 1)
def _linear_op(self, input_tensor, shape):
'''
Perform simple matmul and bias offset between the input_tensor and a tensor
of weights that will be generated in this method.
So you specify an input tensor and the shape of the weight matrix, and this
method does the following:
create weight: W with shape = "shape"
create bias: b with shape = "shape[1]"
matmul(input_tensor, W) + b
'''
(W_loc, b_loc) = self._get_weights(shape, "linear_op_weights")
return tf.matmul(input_tensor, self.weights[W_loc]) + self.biases[b_loc]
def _get_gaussian_params(self, means, stdevs, correls, num_gaussians):
'''
Returns the parameters of the densities in the GMM.
'''
with tf.variable_scope("gmm_breakdown"):
comp_means = tf.split(means, num_gaussians, axis=1)
comp_stdevs = tf.split(stdevs, num_gaussians, axis=1)
comp_correls = tf.split(correls, num_gaussians, axis=1)
return (comp_means, comp_stdevs, comp_correls)
def _eval_gaussians(self, values, means, stdevs, correls, num_gaussians):
'''
Takes tensors of values, means, and stdevs, and returns tensors of
gaussians parametrized by 'means', 'stdevs', and 'correls' evaluated at
'values'. Here we assume that 'values' only contains components relevant
to the GMM on the output.
values -> [bs*sl, M]
stdevs -> [bs*sl, num_gaussians*M]
means -> [bs*sl, num_gaussians*M]
correls -> [bs*sl, num_gaussians]
'''
print("gaussian component shapes:")
print("\tvalues:", values.shape)
print("\tstdevs:", stdevs.shape)
print("\tmeans:", means.shape)
print("\tcorrels:", correls.shape)
with tf.variable_scope("gmm_evaluation"):
comp_means = tf.split(means, num_gaussians, axis=1)
comp_stdevs = tf.split(stdevs, num_gaussians, axis=1)
comp_correls = tf.split(correls, num_gaussians, axis=1)
gaussians = []
for i in range(num_gaussians):
correls_denom = tf.reduce_sum(1 - comp_correls[i]*comp_correls[i], axis=1)
factor = 1./(2*np.pi*tf.reduce_prod(comp_stdevs[i], axis=1)*tf.sqrt(correls_denom) + 1e-8)
print("\tfactor", i, ":", factor.shape)
#print(self.session.run([tf.shape(comp_means[i]), tf.shape(comp_stdevs[i])]))
norms = (values - comp_means[i])/(comp_stdevs[i] + 1e-8)
exponents = -(1/(2*correls_denom + 1e-8))*(tf.reduce_sum(norms*norms, axis=1) - tf.reduce_prod(norms, axis=1)*2*tf.reduce_sum(comp_correls[i], axis=1))
print("\texponents", i, ":", exponents.shape)
#ind_gaussians.append(factors*tf.exp(exponents))
gaussians.append(factor*tf.exp(exponents))
# You have a gaussian for each set of components of the mixture model,
# now you just have to reduce those components into the pieces of the GMM.
#gaussians = [tf.reduce_prod(g, axis=-1) for g in ind_gaussians]
stacked_gaussians = tf.stack(gaussians, axis=1)
print("stacked gaussians shape:", stacked_gaussians.shape)
return stacked_gaussians
def _get_mixture_sample(self, params, mix):
'''
Returns a single sample from the GMM defined by params and the mixture
weights.
Assumes that 'params' is a list of GMM parameters.
Assumes that 'mix' is a simple numpy array, where the mixture's shape is
one-dimensional, and its size is the number of gaussians in the mixture.
'''
# params[0] --> means
# params[1] --> variance terms
# params[2] --> correlation terms
# Variance terms aren't in matrix form, but we know what a 2D gaussian
# with correlated variables looks like, so we use this form to construct
# a 2D gaussian by filling in the covariance matrix and means.
sample = np.zeros_like(params[0][0])
for i in range(self.num_gaussians):
mean = params[0][i]
#print(" mixture_sample mean shape:", mean.shape)
cov = np.zeros((self.input_size - 1, self.input_size - 1))
for j in range(self.input_size - 1):
#print(" mixture_sample cov shape:", params[1][i].shape)
cov[j,j] = params[1][i][j]
#cov[j,1-j] = params[2][i][0]
cov[j,1-j] = params[2][i] # Zero probably removed by squeeze operation
#print("covariance: ", cov)
sample += mix[i]*np.random.multivariate_normal(mean, cov)
return sample[np.newaxis, np.newaxis, :]
def train_batch(self, batch_in, batch_out):
'''
Trains the MDN on a single batch of input.
Returns the loss, parameters of each gaussian, and the weights associated
with each density in the Gaussian Mixture.
'''
(batch_size, sequence_length, input_size) = batch_in.shape
feeds = {
self.input_data: batch_in,
self.output_data: batch_out
}
zero_states = self.session.run(self.zero_states, feed_dict=feeds)
for i in range(self.num_lstm_layers):
feeds[self.init_states[i][0]] = zero_states[i][0]
feeds[self.init_states[i][1]] = zero_states[i][1]
fetches = [
self.train_op,
self.loss,
self.gauss_evals,
self.mixture,
self.means_,
self.stdevs_,
self.mix_weights_,
self.stroke,
self.gauss_params
]
_, loss, gauss_eval, mix_eval, means_, stdevs_, mix, stroke, params = self.session.run(fetches, feed_dict=feeds)
print("shape of means:", means_.shape)
print("shape of stdevs:", stdevs_.shape)
correls = params[2]
max_correl = 0
for i in range(self.num_gaussians):
max_correl = max(max_correl, np.amax(np.sum((correls[i]*correls[i]), axis=1)))
print("max_correl denom:", max_correl)
if max_correl > 1:
print("OUT OF BOUNDS VALUE FOR MAX_CORREL")
sys.exit(-1)
if loss == np.nan:
print("LOSS IS NAN")
sys.exit(-1)
return (loss, means_, stdevs_, mix, gauss_eval, mix_eval, stroke)
def validate_batch(self, batch_in, batch_out):
'''
Runs the network on the given input batch and calculates a loss using the
output batch. No training is performed.
'''
(batch_size, sequence_length, input_size) = batch_in.shape
feeds = {
self.input_data: batch_in,
self.output_data: batch_out
}
zero_states = self.session.run(self.zero_states, feed_dict=feeds)
for i in range(self.num_lstm_layers):
feeds[self.init_states[i][0]] = zero_states[i][0]
feeds[self.init_states[i][1]] = zero_states[i][1]
fetches = [
self.loss,
self.means_,
self.stdevs_,
self._mix_weights
]
loss, means_, stdevs_, mix = self.session.run(fetches, feed_dict=feeds)
return (loss, means_, stdevs_, mix)
def _run_once(self, input_, stroke_, initial_states):
'''
Takes a single input, (e.g. batch_size = 1, sequence_length = 1), passes it
to the MDN, grabs the mixture parameters and final recurrent state of the
MDN. Then it takes the mixture parameters and samples from them.
The MDN returns the sampled value, other outputs, and the final recurrent
state.
Assumes input_.shape = [1, 1, input_size - 1]
Assumes stroke_.shape = [1, 1, 1]
'''
#print('run_once input and stroke shapes:', input_.shape, stroke_.shape)
# Concatenate stroke and (dx, dy) input to get (1, 1, 3) input tensor.
feeds = {
self.input_data: | np.concatenate([input_, stroke_], axis=-1) | numpy.concatenate |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# The MIT License (MIT)
# Copyright (c) 2017 <NAME>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# =============================================================================
# DOC
# =============================================================================
"""feets.extractors.ext_signature Tests"""
# =============================================================================
# IMPORTS
# =============================================================================
from feets import extractors
from matplotlib.testing.decorators import check_figures_equal
import numpy as np
import seaborn as sns
# =============================================================================
# Test cases
# =============================================================================
@check_figures_equal()
def test_plot_SignaturePhMag(fig_test, fig_ref):
# fig test
ext = extractors.Signature()
kwargs = ext.get_default_params()
kwargs.update(
feature="SignaturePhMag",
value=[[1, 2, 3, 4]],
ax=fig_test.subplots(),
plot_kws={},
time=[1, 2, 3, 4],
magnitude=[1, 2, 3, 4],
error=[1, 2, 3, 4],
features={"PeriodLS": 1, "Amplitude": 10},
)
ext.plot(**kwargs)
# expected
eax = fig_ref.subplots()
eax.set_title(
f"SignaturePhMag - {kwargs['phase_bins']}x{kwargs['mag_bins']}"
)
eax.set_xlabel("Phase")
eax.set_ylabel("Magnitude")
sns.heatmap(kwargs["value"], ax=eax, **kwargs["plot_kws"])
def test_multiple_peaks_period_ls():
random = np.random.RandomState(54)
lc = {
"time": np.arange(100) + random.uniform(size=100),
"magnitude": random.normal(size=100),
"error": None,
}
# one peak
ls_ext_1 = extractors.LombScargle()
ls_ext_2 = extractors.LombScargle(peaks=2)
amp_ext = extractors.Amplitude()
sig_ext = extractors.Signature()
rs0 = ls_ext_1.extract(features={}, **lc)
rs0.update(amp_ext.extract(features=rs0, **lc))
rs0.update(sig_ext.extract(features=rs0, **lc))
rs1 = ls_ext_2.extract(features={}, **lc)
rs1.update(amp_ext.extract(features=rs1, **lc))
rs1.update(sig_ext.extract(features=rs1, **lc))
assert np.all(rs0["PeriodLS"][0] == rs1["PeriodLS"][0])
assert np.all(rs0["Amplitude"] == rs1["Amplitude"])
assert | np.all(rs0["SignaturePhMag"] == rs1["SignaturePhMag"]) | numpy.all |
from flask import Flask, render_template, request
import numpy as np
import re
import base64
from PIL import Image
from scipy.misc import imsave, imread, imresize
from util.train import conv
from util.prepare_data import normalize
import json
app = Flask(__name__)
model = conv(classes=9,input_shape=(28, 28, 1))
model.load("./model/doodle_classifier_1.0.tflearn")
@app.route("/", methods=["GET", "POST"])
def ready():
global model
if request.method == "GET":
return render_template("index.html")
if request.method == "POST":
data = request.form["payload"].split(",")[1]
img = base64.decodestring(data.encode('ascii'))
with open('temp.png', 'wb') as output:
output.write(img)
x = imread('temp.png', mode='L')
# resize input image to 28x28
x = imresize(x, (28, 28))
x = np.expand_dims(x, axis=0)
x = | np.reshape(x, (28, 28, 1)) | numpy.reshape |
"""
pyart.io._sigmet_noaa_hh
========================
Functions needed for reading Sigmet files from the airborne radar located on
NOAA's Hurricane Hunter aircraft.
.. autosummary::
:toctree: generated/
_decode_noaa_hh_hdr
_georeference_yprime
"""
import numpy as np
from ._sigmetfile import bin4_to_angle, bin2_to_angle
def _decode_noaa_hh_hdr(
raw_extended_headers, filemetadata, azimuth, elevation,
position_source='irs', heading_source='irs'):
"""
Extract data from Sigmet extended headers produced by NOAA
Hurricane Hunter airborne radars.
Parameters
----------
raw_extended_headers : ndarray
Raw Sigmet extended headers.
filemetadata : FileMetadata
FileMetadata class from which metadata will be derived.
azimuth : dict
Dictionary of azimuth angles recorded in Sigmet file.
elevation : dict
Dictionary of elevation angles recorded in Sigmet file.
position_source: {'irs', 'gps', 'aamps'}, optional
Instrument from which to derive position parameters.
heading_source: {'irs', 'aamps'}
Instrument from which to derive heading parameters.
Returns
-------
latitude : dict
Dictionary containing latitude data and metadata.
longitude : dict
Dictionary containing longitude data and metadata.
altitude : dict
Dictionary containing altitude data and metadata.
heading_params : dict
Dictionary of dictionary containing aircraft heading data and
metadata. Contains 'heading', 'roll', pitch', 'drift', 'rotation',
'tilt' and 'georefs_applied' dictionaries.
"""
xhdr = np.rec.fromstring(raw_extended_headers[..., :68].tostring(),
dtype=list(NOAA_HH_EXTENDED_HEADER))
# rotation and tilt from azimuth/elevation angles
rotation = filemetadata('rotation')
tilt = filemetadata('tilt')
rotation_data = 90. - elevation['data'].copy()
rotation_data[rotation_data < 0] += 360.
rotation['data'] = rotation_data
tilt_data = azimuth['data'].copy()
tilt_data[tilt_data > 180] -= 360.
tilt['data'] = tilt_data
# airborne parameters
heading = filemetadata('heading')
roll = filemetadata('roll')
pitch = filemetadata('pitch')
drift = filemetadata('drift')
if heading_source == 'irs':
heading_data = bin2_to_angle(xhdr['irs_heading'])
roll_data = bin2_to_angle(xhdr['irs_roll'])
pitch_data = bin2_to_angle(xhdr['irs_pitch'])
drift_data = bin2_to_angle(xhdr['irs_drift'])
elif heading_source == 'aamps':
heading_data = bin2_to_angle(xhdr['aamps_heading'])
roll_data = bin2_to_angle(xhdr['aamps_roll'])
pitch_data = bin2_to_angle(xhdr['aamps_pitch'])
drift_data = bin2_to_angle(xhdr['aamps_drift'])
else:
raise ValueError('Unknown heading_source')
heading['data'] = heading_data
roll['data'] = roll_data
pitch['data'] = pitch_data
drift['data'] = drift_data
# georeferenced azimuth and elevation
az, elev = _georeference_yprime(
roll_data, pitch_data, heading_data, drift_data, rotation_data,
tilt_data)
azimuth['data'] = az
elevation['data'] = elev
georefs_applied = filemetadata('georefs_applied')
georefs_applied['data'] = np.ones(az.shape, dtype='int8')
# positions: latitude, longitude, altitude
latitude = filemetadata('latitude')
longitude = filemetadata('longitude')
altitude = filemetadata('altitude')
if position_source == 'gps':
lat_data = bin4_to_angle(xhdr['gps_lat'])
lon_data = bin4_to_angle(xhdr['gps_long'])
alt_data = xhdr['gps_alt'] / 100.
elif position_source == 'aamps':
lat_data = bin4_to_angle(xhdr['aamps_lat'])
lon_data = bin4_to_angle(xhdr['aamps_long'])
alt_data = xhdr['aamps_alt'] / 100.
elif position_source == 'irs':
lat_data = bin4_to_angle(xhdr['irs_lat'])
lon_data = bin4_to_angle(xhdr['irs_long'])
alt_data = xhdr['gps_alt'] / 100.
else:
raise ValueError('Invalid position_source')
latitude['data'] = lat_data
longitude['data'] = lon_data
altitude['data'] = alt_data
extended_header_params = {
'heading': heading,
'roll': roll,
'pitch': pitch,
'drift': drift,
'rotation': rotation,
'tilt': tilt,
'georefs_applied': georefs_applied}
return (latitude, longitude, altitude, extended_header_params)
def _georeference_yprime(roll, pitch, heading, drift, rotation, tilt):
"""
Compute georeferenced azimuth and elevation angles for a Y-prime radar.
This is the georeferencing needed for the tail doppler radar on the
NOAA P3 aircraft.
"""
# Adapted from Radx's SigmetRadxFile::_computeAzEl method found in
# SigmetRadxFile.cc
# Transforms defined in Wen-Chau Lee et al, JTech, 1994, 11, 572-578.
# Convert to radians and use variable names from Wen-Chau Lee paper
R = np.radians(roll) # roll
P = np.radians(pitch) # pitch
H = np.radians(heading) # heading
D = np.radians(drift) # drift
T = H + D # track
theta_a = np.radians(rotation)
tau_a = np.radians(tilt)
# Eq. (9)
x_t = (np.cos(theta_a + R) * np.sin(D) * np.cos(tau_a) * np.sin(P) +
np.cos(D) * np.sin(theta_a + R) * np.cos(tau_a) -
np.sin(D) * np.cos(P) * np.sin(tau_a))
y_t = (-np.cos(theta_a + R) * np.cos(D) * np.cos(tau_a) * np.sin(P) +
np.sin(D) * np.sin(theta_a + R) * np.cos(tau_a) +
np.cos(P) * np.cos(D) * np.sin(tau_a))
z_t = (np.cos(P) * np.cos(tau_a) * np.cos(theta_a + R) +
np.sin(P) * | np.sin(tau_a) | numpy.sin |
from sklearn import svm
import numpy as np
import sys
sys.path.append("..")
import data_tools as dt
import compartment_analysis as ca
from matplotlib import pyplot as plt
import os
import linear_algebra as la
import array_tools as at
from scipy import stats as st
#import plotting as plot
res_kb = 100
cell_type1 = sys.argv[1]
cell_type2 = sys.argv[2]
chroms = range(1, int(sys.argv[3]))
x_means = []
y_means = []
z_means = []
x_lengths = []
y_lengths = []
z_lengths = []
for chrom in chroms:
path1 = "hic_data/{}_{}_{}kb.bed".format(cell_type1, chrom, res_kb)
path2 = "hic_data/{}_{}_{}kb.bed".format(cell_type2, chrom, res_kb)
if os.path.isfile(path1) and os.path.isfile(path2):
os.system("python ../multimds.py --full -w 0 {} {}".format(path1, path2))
structure1 = dt.structure_from_file("hic_data/{}_{}_{}kb_structure.tsv".format(cell_type1, chrom, res_kb))
structure2 = dt.structure_from_file("hic_data/{}_{}_{}kb_structure.tsv".format(cell_type2, chrom, res_kb))
#plot.plot_structures_interactive((structure1, structure2))
#compartments
contacts1 = dt.matFromBed(path1, structure1)
contacts2 = dt.matFromBed(path2, structure2)
at.makeSymmetric(contacts1)
at.makeSymmetric(contacts2)
compartments1 = np.array(ca.get_compartments(contacts1))
compartments2 = np.array(ca.get_compartments(contacts2))
r, p = st.pearsonr(compartments1, compartments2)
if r < 0:
compartments2 = -compartments2
#SVR
coords1 = structure1.getCoords()
coords2 = structure2.getCoords()
coords = np.concatenate((coords1, coords2))
compartments = np.concatenate((compartments1, compartments2))
clf = svm.LinearSVR()
clf.fit(coords, compartments)
coef = clf.coef_
transformed_coords1 = np.array(la.change_coordinate_system(coef, coords1))
transformed_coords2 = np.array(la.change_coordinate_system(coef, coords2))
x_diffs = transformed_coords1[:,0] - transformed_coords2[:,0]
y_diffs = transformed_coords1[:,1] - transformed_coords2[:,1]
z_diffs = transformed_coords1[:,2] - transformed_coords2[:,2]
x_means.append(np.mean(np.abs(x_diffs)))
y_means.append(np.mean(np.abs(y_diffs)))
z_means.append(np.mean(np.abs(z_diffs)))
#axis lengths
centroid1 = np.mean(transformed_coords1, axis=0)
centroid2 = np.mean(transformed_coords2, axis=0)
x_length1 = np.mean([np.abs(coord1[0] - centroid1[0]) for coord1 in transformed_coords1])
y_length1 = np.mean([np.abs(coord1[1] - centroid1[1]) for coord1 in transformed_coords1])
z_length1 = np.mean([np.abs(coord1[2] - centroid1[2]) for coord1 in transformed_coords1])
x_length2 = np.mean([np.abs(coord2[0] - centroid2[0]) for coord2 in transformed_coords2])
y_length2 = np.mean([np.abs(coord2[1] - centroid2[1]) for coord2 in transformed_coords2])
z_length2 = np.mean([np.abs(coord2[2] - centroid2[2]) for coord2 in transformed_coords2])
x_lengths.append( | np.mean((x_length1, x_length2)) | numpy.mean |
import numpy as np
from phi import struct
from phi.math.math_util import is_static_shape
# creates normal distributed noise that can vary over the batch
def generateNoise(grid, var, mean=0, seed=0, dtype=np.float32):
size = grid.data.shape
rand = np.random.RandomState(seed)
def array(shape):
result = np.ones(size)
for i in range(size[0]):
result[i] = rand.normal(mean, var[i], size=size[1:]).astype(dtype)
return result
return struct.map(array, grid, leaf_condition=is_static_shape)
# creates randomized parameters for grid generation (v_parameter in paramType is varied over batch)
def generateParams(paramType, batch, dim, noiseStrength, vf1, vf2, vf3, vf4, vf5, vf7, vo1, vo2, vod, vnoise):
f1 = (-0.5 + np.random.rand(dim,dim) ) * 0.4 # range +- 0.4 , initial main dir
f2 = (-0.5 + np.random.rand(dim,dim) ) * 0.4 # reduced ranger for frequencies , freq1
f3 = (-0.5 + np.random.rand(dim,dim) ) * 0.3 # freq2
f4 = (-0.5 + np.random.rand(dim,dim) ) * 0.3 # freq3
f5 = (-0.5 + np.random.rand(dim,dim) ) * 0.2 # freq4
f7 = (-0.5 + np.random.rand(dim,dim) ) * 100.0 # forcing shift, dir&speed
o1 = 0. + ( np.random.rand(dim,dim)*100. ) # offsets, minor influence
o2 = 0. + ( np.random.rand(dim,dim)*100. ) #
nu = 0.0002 * ( 1+ np.random.rand()*500. ) # diffusion
# switch between "static" (near 1) and "forced" (=0) cases smoothly
sfCase = 0. + ( np.random.rand(dim,dim)*1. )
# enlarge regular init for "static"
staticF = (1.+2.*sfCase)
f1 *= staticF
f2 *= staticF
f3 *= staticF
f4 *= staticF
f5 *= staticF
f6 = (1.-sfCase) * 0.1 # note, factor is just forcing strength scaling for f6 factor
fd = 1. / (1. + np.random.randint(6, size=(dim,dim)))
od = 0. + np.random.rand(dim,dim)*100.
f1 = np.repeat(f1[np.newaxis,...], batch, axis=0)
f2 = np.repeat(f2[np.newaxis,...], batch, axis=0)
f3 = np.repeat(f3[np.newaxis,...], batch, axis=0)
f4 = np.repeat(f4[np.newaxis,...], batch, axis=0)
f5 = np.repeat(f5[np.newaxis,...], batch, axis=0)
f6 = np.repeat(f6[np.newaxis,...], batch, axis=0)
f7 = np.repeat(f7[np.newaxis,...], batch, axis=0)
o1 = np.repeat(o1[np.newaxis,...], batch, axis=0)
o2 = np.repeat(o2[np.newaxis,...], batch, axis=0)
fd = np.repeat(fd[np.newaxis,...], batch, axis=0)
od = np.repeat(od[np.newaxis,...], batch, axis=0)
noise = noiseStrength * np.ones(batch) # normally constant noise level
amount = np.repeat(np.arange(batch)[...,np.newaxis], dim, axis=1)
amount = np.repeat(amount[...,np.newaxis], dim, axis=2)
if paramType == "f1": f1 += vf1 * amount
elif paramType == "f1neg": f1 -= vf1 * amount
elif paramType == "f2": f2 += vf2 * amount
elif paramType == "f2neg": f2 -= vf2 * amount
elif paramType == "f3": f3 += vf3 * amount
elif paramType == "f3neg": f3 -= vf3 * amount
elif paramType == "f4": f4 += vf4 * amount
elif paramType == "f4neg": f4 -= vf4 * amount
elif paramType == "f5": f5 += vf5 * amount
elif paramType == "f5neg": f5 -= vf5 * amount
elif paramType == "f7": f7 += vf7 * amount
elif paramType == "f7neg": f7 -= vf7 * amount
elif paramType == "o1": o1 += vo1 * amount
elif paramType == "o1neg": o1 -= vo1 * amount
elif paramType == "o2": o2 += vo2 * amount
elif paramType == "o2neg": o2 -= vo2 * amount
elif paramType == "od": od += vod * amount
elif paramType == "odneg": od -= vod * amount
elif paramType == "noise": noise = vnoise * np.arange(batch) # increasing noise level
else: raise ValueError("Unknown parameter type!")
params = {
"nu" : np.array(nu),
"f1" : f1,
"f2" : f2,
"f3" : f3,
"f4" : f4,
"f5" : f5,
"f6" : f6,
"f7" : f7,
"o1" : o1,
"o2" : o2,
"fd" : fd,
"od" : od,
"noise" : noise,
}
return params
# utilizes parameters generated with generateParams for grid initialization
def createParameterizedGrid(grid, gridType, age, dim, params):
p = params
size = np.array(grid.shape)
def array(shape):
mesh = np.meshgrid(np.arange(0, size[1]), np.arange(0, size[2]), np.arange(0, size[3]), indexing='ij')
mesh = np.transpose(np.asarray(mesh), (1,2,3,0))[...,np.newaxis]
mesh = | np.repeat(mesh, dim, axis=dim+1) | numpy.repeat |
import pysplishsplash
import gym
import pickle
import numpy as np
import torch
import argparse
import os,sys
import time
from scipy.ndimage import gaussian_filter,gaussian_filter1d
from scipy.stats import linregress
from scipy.spatial.transform import Rotation as R
import math
import matplotlib.pyplot as plt
from tqdm import tqdm,trange
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def convert_state_to_torch(state):
features = torch.FloatTensor(np.array([state[0]]).reshape(1, -1)).to(device)
particles = torch.FloatTensor(state[1].reshape(1,*state[1].shape)).to(device)
return features,particles
def evalstuff(state,action,td3):
features,particles = convert_state_to_torch(state)
features[-1] = 0
#td3.actor.eval()
#td3.critic.eval()
#print("likestuff",td3.actor(features,particles),td3.critic.Q1(features,particles, td3.actor(features,particles)))
#print("action",action)
q_val = policy.eval_q(state,action)
#print(state[0],action,q_val)
#print("chosen",q_val)
#print("zero",policy.eval_q(state,[0]))
#print("special",policy.eval_q(state,[1]))
#print("one",policy.eval_q(state,[1,1,1]))
return (q_val[0][0]+q_val[1][0])/2
def train(state,td3):
batch_size = 32
all_feat = []
all_part = []
for _ in range(batch_size):
f,p = convert_state_to_torch(state)
all_feat.append(f)
all_part.append(p)
features = torch.cat(all_feat,0)
particles = torch.cat(all_part,0)
td3._actor_learn(features,particles)
def plot_q_compare(rew_lists,q_lists,discount,path,show=False):
maxi = max(len(x) for x in rew_lists)
print([len(x) for x in rew_lists])
emp_rewards = [0 for _ in range(len(rew_lists))]
emp_avg = []
q_avg = []
for i in range(maxi-1,-1,-1):
for j in range(len(rew_lists)):
emp_pot = []
q_pot = []
if len(rew_lists[j]) > i:
emp_rewards[j] = emp_rewards[j]*discount + rew_lists[j][i]
emp_pot.append(emp_rewards[j])
q_pot.append(q_lists[j][i])
emp_avg.append(np.mean(emp_pot))
q_avg.append(np.mean(q_pot))
emp_avg.reverse()
q_avg.reverse()
plt.plot(emp_avg,label="empirical Q value (discounted)")
plt.plot(q_avg,label="TD3 computed Q value")
plt.xlabel("time step")
plt.ylabel("Q-value")
plt.legend()
plt.savefig(path)
if show:
plt.show()
plt.cla()
def running_mean(x, N):
cumsum = np.cumsum(np.insert(x, 0, 0))
return (cumsum[N:] - cumsum[:-N]) / float(N)
def smooth_compare(x_ax1,x_ax2,vals1,vals2,xlabel,ylabel,legend_vals,path,show=False,sigma=5):
fig = plt.figure(figsize=(10,4))
lims = (max(min(x_ax1),min(x_ax2)),min(max(x_ax1),max(x_ax2)))
v1 = gaussian_filter1d(np.array(vals1,dtype=np.float),sigma)
v2 = gaussian_filter1d(np.array(vals2,dtype=np.float),sigma)
plt.plot(x_ax1[:len(v1)],v1,label=legend_vals[0],color="#00664d",linewidth=2)
plt.plot(x_ax1[:len(vals1)],vals1,color="#00664d",alpha=0.4,linewidth=0.8)
plt.plot(x_ax2[:len(v2)],v2,label=legend_vals[1],color="#e65c00",linewidth=2)
plt.plot(x_ax2[:len(vals2)],vals2,color="#e65c00",alpha=0.4,linewidth=0.8)
if ylabel=="Deviation from target fill level (ml)":
plt.ylim(0,50)
if ylabel=="Spilled":
plt.ylim(0,5)
plt.xticks(np.arange(lims[0],lims[1]+1,20))
plt.xlim(lims[0],lims[1])
plt.xlabel(xlabel)
plt.ylabel(ylabel)
#plt.title(title)
plt.legend()
plt.tight_layout()
plt.savefig(path)
if show:
plt.show()
plt.cla()
def plot_mean(x_ax,vals,xlabel,ylabel,legend_val,title,path,show=False,sigma=5):
#plt.rcParams.update({'font.size': 17})
rm = gaussian_filter1d(np.array(vals,dtype=np.float),sigma)
fig = plt.figure(figsize=(10,4))
plt.plot(x_ax,vals,label=legend_val)
plt.plot(x_ax[:len(rm)],rm,label="gaussian smoothed",linewidth=4.0)
plt.xticks(np.arange(min(x_ax),max(x_ax)+1,60))
plt.xlim((min(x_ax),max(x_ax)))
plt.xlabel(xlabel)
plt.ylabel(ylabel)
if title=="Absolute deviation from target fill level":
plt.ylim(0,50)
plt.title(title)
plt.legend()
plt.tight_layout()
plt.savefig(path)
if show:
plt.show()
plt.cla()
def plot_action(all_action_list,path,show=False):
max_len = max(len(x) for x in all_action_list)
avg_actions = []
for i in range(max_len):
pot = []
for acs in all_action_list:
if len(acs)>i:
pot.append(acs[i])
avg_actions.append(np.mean(pot))
plt.plot(avg_actions)
plt.xlabel("Step")
plt.ylabel("Avg rotation action")
plt.title("avg rotation action per step")
plt.savefig(path)
if show:
plt.show()
plt.cla()
def plot_2d(tsps,spills,values,title,path,show=False,sigma=5):
#S,T = np.meshgrid(tsps,spills)
V = np.array(values).reshape((len(tsps),len(tsps)))
print(V)
V = gaussian_filter(V,sigma=5)
#plt.pcolormesh(T,S,V,shading="gouraud")
plt.imshow(V,interpolation="bilinear",cmap='RdBu',origin="lower")#,extent=[tsps[0],tsps[-1],spills[0],spills[-1]])
plt.xticks(range(0,len(tsps),4),[round(x,2) for x in tsps[::4]])
plt.yticks(range(0,len(spills),4),[round(x,2) for x in spills[::4]])
plt.xlabel("time step punish")
plt.ylabel("spill punish")
plt.colorbar()
plt.title(title)
plt.savefig(path)
if show:
plt.show()
plt.clf()
def eval_2d(policy,eval_env,seed,path,root_episodes=30,sigma=5,render=False):
os.makedirs(path,exist_ok=True)
policy.critic.eval()
policy.actor.eval()
eval_env.seed(seed + 100)
eval_env.fixed_tsp = True
eval_env.fixed_spill = True
spills = np.linspace(eval_env.spill_range[0],eval_env.spill_range[1],num=root_episodes)
tsps = np.linspace(eval_env.time_step_punish_range[0],eval_env.time_step_punish_range[1],num=root_episodes)
all_q_val_lists = []
b_list = []
all_reward_lists = []
max_angle_list = []
all_action_list = []
spill_list = []
glass_list = []
print("Evaluating")
"""for spill in tqdm(spills):
for tsp in tsps:
state, done = eval_env.reset(use_gui=render), False
eval_env.spill_punish = spill
eval_env.time_step_punish = tsp
b = 0
reward_list = []
q_val_list = []
action_list = []
max_angle = 0
while not done:
b+=1
action = policy.select_action(state)
action_list.append(action)
max_angle = max(state[0][0],max_angle)
q_val_list.append(evalstuff(state,action,policy))
state, reward, done, _ = eval_env.step(action)
if render:
eval_env.render()
reward_list.append(reward)
all_q_val_lists.append(q_val_list)
all_reward_lists.append(reward_list)
all_action_list.append(action_list)
spill_list.append(eval_env.particle_locations["spilled"])
glass_list.append(eval_env.particle_locations["glass"])
max_angle_list.append(max_angle)
b_list.append(b)
rew_list = [np.sum(x) for x in all_reward_lists]
all_arrays = np.array([b_list,max_angle_list,spill_list,glass_list,[np.sum(x) for x in all_reward_lists]])
np.save(os.path.join(path,"all_arrays.npy"),all_arrays)"""
b_list,max_angle_list,spill_list,glass_list,rew_list = np.load(os.path.join(path,"all_arrays.npy"))
#plot_2d(tsps,spills,glass_list,"Fill state",os.path.join(path,"fill_state.svg"))
#plot_2d(tsps,spills,spill_list,"Spilled",os.path.join(path,"spilled.svg"))
#plot_2d(tsps,spills,max_angle_list,"Max angle",os.path.join(path,"max_angle.svg"))
#plot_2d(tsps,spills,rew_list,"Total return",os.path.join(path,"total_return.svg"))
plot_2d(tsps,spills,b_list,"episode_length",os.path.join(path,"episode_length.svg"),sigma=sigma)
#plot_q_compare(all_reward_lists,all_q_val_lists,args.discount)
def compare2(load1,load2,name1,name2,min_rot1,min_rot2,basepath="plots/test",sigma=5,to_eval="targ"):
os.makedirs(basepath,exist_ok=True)
name_map = {"tsp":"Time Step Punish",
"spill":"Spill Punish",
"targ":"Target fill level (ml)"}
with open(load1,"rb") as f:
all_q_val_lists1, b_list1, all_reward_lists1, max_angle_list1, all_action_list1, ev_list1, spill_list1, glass_list1, avg_reward1 = pickle.load(f)
with open(load2,"rb") as f:
all_q_val_lists2, b_list2, all_reward_lists2, max_angle_list2, all_action_list2, ev_list2, spill_list2, glass_list2, avg_reward2 = pickle.load(f)
print(np.sum(spill_list1),np.sum(spill_list2))
reward_sum1 = [np.sum(x) for x in all_reward_lists1]
reward_sum2 = [np.sum(x) for x in all_reward_lists2]
for i in range(len(max_angle_list1)):
radians = max_angle_list1[i]*(math.pi-min_rot1)+min_rot1
degrees = (radians*180)/math.pi
max_angle_list1[i] = degrees
for i in range(len(max_angle_list2)):
radians = max_angle_list2[i]*(math.pi-min_rot2)+min_rot2
degrees = (radians*180)/math.pi
max_angle_list2[i] = degrees
if to_eval=="targ":
dev_list1 = (np.array(glass_list1)-np.array(ev_list1))
dev_list2 = (np.array(glass_list2)-np.array(ev_list2))
smooth_compare(ev_list1,ev_list2,dev_list1,dev_list2,"Target fill level (ml)","Deviation from target fill level (ml)",
[name1,name2],os.path.join(basepath,"deviation.svg"),sigma=sigma)
smooth_compare(ev_list1,ev_list2,np.abs(dev_list1),np.abs(dev_list2),"Target fill level (ml)","Deviation from target fill level (ml)",
[name1,name2],os.path.join(basepath,"abs_deviation.svg"),sigma=sigma)
smooth_compare(ev_list1,ev_list2,b_list1,b_list2,name_map[to_eval],"Episode length (steps)",
[name1,name2],os.path.join(basepath,"epi_length.svg"),sigma=sigma)
smooth_compare(ev_list1,ev_list2,max_angle_list1,max_angle_list2,name_map[to_eval],"Angle (Degrees)",
[name1,name2],os.path.join(basepath,"angle.svg"),sigma=sigma)
smooth_compare(ev_list1,ev_list2,reward_sum1,reward_sum2,name_map[to_eval],"Return",
[name1,name2],os.path.join(basepath,"return.svg"),sigma=sigma)
smooth_compare(ev_list1,ev_list2,spill_list1,spill_list2,name_map[to_eval],"Spilled",
[name1,name2],os.path.join(basepath,"spilled.svg"),sigma=sigma)
smooth_compare(ev_list1,ev_list2,glass_list1,glass_list2,name_map[to_eval],"fill-level (ml)",
[name1,name2],os.path.join(basepath,"fill_state.svg"),sigma=sigma)
def rotation_volume_analysis(policy,eval_env,save_path,render=False):
eval_env.fixed_tsp = True
eval_env.fixed_spill = True
eval_env.time_step_punish = 1
eval_env.spill_punish = 25
eval_env.fixed_target_fill = True
targ_fills = [120,150,180,210]
action_lists = []
rotation_lists = []
volumes_lists = []
for tf in targ_fills:
eval_env.target_fill_state = tf
state, done = eval_env.reset(use_gui=render), False
action_list = []
rotation_list = []
volumes_list = []
while not done:
action = policy.select_action(state)
if render:
eval_env.render()
volumes_list.append(eval_env.particle_locations["glass"])
action_list.append(action[0])
bottle_radians = R.from_matrix(env.bottle.rotation).as_euler("zyx")[0]
rotation_list.append((bottle_radians/math.pi)*180)
state, reward, done, _ = eval_env.step(action)
action_lists.append(action_list)
rotation_lists.append(rotation_list)
volumes_lists.append(volumes_list)
with open(save_path,"wb") as f:
pickle.dump({"targ_fills":targ_fills,
"actions":action_lists,
"rotations":rotation_lists,
"volumes":volumes_lists},f)
def eval_1d(policy, eval_env, seed, basepath="plots/test", eval_episodes=10, to_eval="tsp", N=5, render=False, load=None):
os.makedirs(basepath,exist_ok=True)
name_map = {"tsp":"Time Step Punish",
"spill":"Spill Punish",
"targ":"Target fill level (ml)"}
if load is None:
policy.critic.eval()
policy.actor.eval()
eval_env.seed(seed + 100)
eval_env.fixed_tsp = True
eval_env.fixed_spill = True
eval_env.fixed_target_fill = True
eval_env.target_fill_state = eval_env.max_in_glass
eval_env.time_step_punish = 1
eval_env.spill_punish = 25
all_q_val_lists = []
b_list = []
all_reward_lists = []
max_angle_list = []
all_action_list = []
ev_list = []
spill_list = []
glass_list = []
print("Evaluating")
for i in trange(eval_episodes):
state, done = eval_env.reset(use_gui=render), False
if to_eval == "tsp":
tsp = (eval_env.time_step_punish_range[0]+(eval_env.time_step_punish_range[1] -
eval_env.time_step_punish_range[0])/(eval_episodes-1) * i)
ev_list.append(tsp)
eval_env.time_step_punish = tsp
elif to_eval == "spill":
spill_punish = (eval_env.spill_range[0]+(eval_env.spill_range[1] -
eval_env.spill_range[0])/(eval_episodes-1) * i)
eval_env.spill_punish = spill_punish
ev_list.append(spill_punish)
elif to_eval == "targ":
target_fill = (eval_env.target_fill_range[0]+(eval_env.target_fill_range[1] -
eval_env.target_fill_range[0])/(eval_episodes-1) * i)
eval_env.target_fill_state = target_fill
print(target_fill)
ev_list.append(target_fill)
b = 0
reward_list = []
q_val_list = []
action_list = []
max_angle = 0
while not done:
b+=1
action = policy.select_action(state)
action_list.append(action)
angle = state[0][0] if type(state)==tuple else state[0]
max_angle = max(angle,max_angle)
q_val_list.append(evalstuff(state,action,policy))
state, reward, done, _ = eval_env.step(action)
if render:
eval_env.render()
reward_list.append(reward)
all_q_val_lists.append(q_val_list)
all_reward_lists.append(reward_list)
all_action_list.append(action_list)
spill_list.append(eval_env.particle_locations["spilled"])
glass_list.append(eval_env.particle_locations["glass"])
max_angle_list.append(max_angle)
b_list.append(b)
avg_reward = np.mean([np.sum(x) for x in all_reward_lists])
with open(os.path.join(basepath,"data.pkl"),"wb") as f:
to_save = [all_q_val_lists, b_list, all_reward_lists,
max_angle_list, all_action_list, ev_list,
spill_list, glass_list, avg_reward]
pickle.dump(to_save,f)
else:
with open(os.path.join(basepath,"data.pkl"),"rb") as f:
all_q_val_lists, b_list, all_reward_lists, max_angle_list, all_action_list, ev_list, spill_list, glass_list, avg_reward = pickle.load(f)
for i in range(len(max_angle_list)):
radians = max_angle_list[i]*(math.pi-env.min_rotation)+env.min_rotation
degrees = (radians*180)/math.pi
max_angle_list[i] = degrees
ev_list = np.array(ev_list)
print(linregress(ev_list[ev_list>=100],np.array(b_list)[ev_list>=100]))
#print(linregress(ev_list[ev_list>=0],np.array(max_angle_list)[ev_list>=0]))
if to_eval=="targ":
dev_list = (np.array(glass_list)-np.array(ev_list))
#percent_list = (np.array(glass_list)-np.array(ev_list))/np.array(ev_list)
#percent_list*=100
#print(linregress(ev_list[ev_list>=340],np.abs(dev_list[ev_list>=340])))
plot_mean(ev_list,dev_list,name_map[to_eval],"Deviation from target fill level (ml)","Deviation",
"Deviation from target fill level",os.path.join(basepath,f"{to_eval}_deviation.svg"),sigma=N)
plot_mean(ev_list,np.abs(dev_list),name_map[to_eval],"Absolute deviation from target fill level (ml)","Deviation",
"Absolute deviation from target fill level",os.path.join(basepath,f"{to_eval}_abs_deviation.svg"),sigma=N)
plot_mean(ev_list,b_list,name_map[to_eval],"Episode length","Episode length",
"Episode lengths",os.path.join(basepath,f"{to_eval}_episode_length.svg"),sigma=N)
plot_mean(ev_list,max_angle_list,name_map[to_eval],"Degrees","Degrees",
f"Maximum angle of inclination",os.path.join(basepath,f"{to_eval}_angle.svg"),sigma=N)
reward_sum = [np.sum(x) for x in all_reward_lists]
plot_mean(ev_list,reward_sum,name_map[to_eval],"Return","total return","total return",
os.path.join(basepath,f"{to_eval}_return.svg"),sigma=N)
plot_action(all_action_list,os.path.join(basepath,"action.svg"))
plot_mean(ev_list,spill_list,name_map[to_eval],"Spilled","num particles spilled",
"Particles Spilled",os.path.join(basepath,f"{to_eval}_spilled.svg"),sigma=N)
plot_mean(ev_list,glass_list,name_map[to_eval],"particles in glass","num particles in glass",
"Final fill state",os.path.join(basepath,f"{to_eval}_fill.svg"),sigma=N)
plot_q_compare(all_reward_lists,all_q_val_lists,args.discount,os.path.join(basepath,"q_compare.svg"))
print("---------------------------------------")
print(f"Evaluation over {eval_episodes} episodes: {avg_reward:.3f}")
print(f"Avg episode length {np.mean(b_list)}")
print("---------------------------------------")
eval_env.fixed_tsp = False
eval_env.reset(use_gui=False)
return avg_reward
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--policy", default="TD3_particles") # Policy name (TD3, DDPG or OurDDPG)
parser.add_argument("--env", default="water_pouring:Pouring-mdp-v0") # OpenAI gym environment name
parser.add_argument("--seed", default=0, type=int) # Sets Gym, PyTorch and Numpy seeds
parser.add_argument("--start_timesteps", default=5e4, type=int) # Time steps initial random policy is used
parser.add_argument("--eval_freq", default=1e2, type=int) # How often (time steps) we evaluate
parser.add_argument("--max_timesteps", default=1e6, type=int) # Max time steps to run environment
parser.add_argument("--expl_noise", default=0.1, type=float) # Std of Gaussian exploration noise
parser.add_argument("--batch_size", default=256, type=int) # Batch size for both actor and critic
parser.add_argument("--discount", default=0.99, type=float) # Discount factor
parser.add_argument("--policy_uncertainty",default=0.3, type=float) # Std of env policy uncertainty
parser.add_argument("--tau", default=0.005, type=float) # Target network update rate
parser.add_argument("--policy_noise", default=0.2, type=float) # Noise added to target policy during critic update
parser.add_argument("--noise_clip", default=0.5, type=float) # Range to clip target policy noise
parser.add_argument("--policy_freq", default=2, type=int) # Frequency of delayed policy updates
#parser.add_argument("--time_step_punish", default=0.1, type=float)
parser.add_argument("--save_model", action="store_true") # Save model and optimizer parameters
parser.add_argument("--load_model", default="") # Model load file name, "" doesn't load, "default" uses file_name
parser.add_argument("--norm",type=str, default="layer")
parser.add_argument("--render", action="store_true")
parser.add_argument("--path",type=str, default="plots/test")
parser.add_argument("--to_eval",type=str, default="tsp")
parser.add_argument("--eval_episodes",type=int,default=100)
parser.add_argument("--running_num",type=int, default=5)
parser.add_argument("--load",type=str, default="")
parser.add_argument("--load2",type=str, default="")
parser.add_argument("--name1",type=str, default="default1")
parser.add_argument("--name2",type=str, default="default2")
parser.add_argument("--min_rot1",type=float, default=1.22)
parser.add_argument("--min_rot2",type=float, default=1.22)
parser.add_argument("--human_compare",action="store_true")
parser.add_argument("--jerk_punish",type=float, default=0)
parser.add_argument("--rot_vol_analysis",action="store_true")
args = parser.parse_args()
if args.load2!="":
compare2(args.load,args.load2,args.name1,args.name2,args.min_rot1,args.min_rot2,basepath=args.path,sigma=args.running_num,to_eval=args.to_eval)
exit()
env_kwargs = {
"policy_uncertainty":args.policy_uncertainty,
"jerk_punish":args.jerk_punish
}
if args.human_compare:
env_kwargs["scene_base"] = "scenes/smaller_scene.json"
env = gym.make(args.env,**env_kwargs)
if args.human_compare:
env.max_in_glass = 215
env.target_fill_range = [114,209]
print(env.observation_space,env.action_space)
print("made Env")
env.seed(args.seed)
torch.manual_seed(args.seed)
| np.random.seed(args.seed) | numpy.random.seed |
import numpy as np
import matplotlib.pyplot as plt
from qibo.models import Circuit
from qibo import gates
import aux_functions as aux
def rw_circuit(qubits, parameters, X=True):
"""Circuit that implements the amplitude distributor part of the option pricing algorithm.
Args:
qubits (int): number of qubits used for the unary basis.
paramters (list): values to be introduces into the fSim gates for amplitude distribution.
X (bool): whether or not the first X gate is executed.
Returns:
generator that yield the gates needed for the amplitude distributor circuit
"""
if qubits%2==0:
mid1 = int(qubits/2)
mid0 = int(mid1-1)
if X:
yield gates.X(mid1)
yield gates.fSim(mid1, mid0, parameters[mid0]/2, 0)
for i in range(mid0):
yield gates.fSim(mid0-i, mid0-i-1, parameters[mid0-i-1]/2, 0)
yield gates.fSim(mid1+i, mid1+i+1, parameters[mid1+i]/2, 0)
else:
mid = int((qubits-1)/2)
if X:
yield gates.X(mid)
for i in range(mid):
yield gates.fSim(mid-i, mid-i-1, parameters[mid-i-1]/2, 0)
yield gates.fSim(mid+i, mid+i+1, parameters[mid+i]/2, 0)
def rw_circuit_inv(qubits, parameters, X=True):
"""Circuit that implements the amplitude distributor part of the option pricing algorithm in reverse.
Used in the amplitude estimation part of the algorithm.
Args:
qubits (int): number of qubits used for the unary basis.
paramters (list): values to be introduces into the fSim gates for amplitude distribution.
X (bool): whether or not the first X gate is executed.
Returns:
generator that yield the gates needed for the amplitude distributor circuit in reverse order.
"""
if qubits%2==0:
mid1 = int(qubits/2)
mid0 = int(mid1-1)
for i in range(mid0 - 1, -1, -1):
yield gates.fSim(mid0 - i, mid0 - i - 1, -parameters[mid0 - i - 1]/2, 0)
yield gates.fSim(mid1 + i, mid1 + i + 1, -parameters[mid1 + i]/2, 0)
yield gates.fSim(mid1, mid0, -parameters[mid0]/2, 0)
if X:
yield gates.X(mid1)
else:
mid = int((qubits-1)/2)
for i in range(mid - 1, -1, -1):
yield gates.fSim(mid + i, mid + i + 1, -parameters[mid + i] / 2, 0)
yield gates.fSim(mid - i, mid - i - 1, -parameters[mid - i - 1] / 2, 0)
if X:
yield gates.X(mid)
def create_qc(qubits):
"""Creation of the quantum circuit and registers where the circuit will be implemented.
Args:
qubits (int): number of qubits used for the unary basis.
Returns:
q (list): quantum register encoding the asset's price in the unary bases.
ancilla (int): qubit that encodes the payoff of the options.
circuit (Circuit): quantum circuit with enough allocated space for the algorithm to run.
"""
q = [i for i in range(qubits)]
ancilla = qubits
circuit = Circuit(qubits+1)
return q, ancilla, circuit
def rw_parameters(qubits, pdf):
"""Parameters that encode a target probability distribution into the unary basis
Args:
qubits (int): number of qubits used for the unary basis.
pdf (list): known probability distribution function that wants to be reproduced.
Returns:
paramters (list): values to be introduces into the fSim gates for amplitude distribution.
"""
if qubits%2==0:
mid = qubits // 2
else:
mid = (qubits-1)//2 #Important to keep track of the centre
last = 1
parameters = []
for i in range(mid-1):
angle = 2 * np.arctan(np.sqrt(pdf[i]/(pdf[i+1] * last)))
parameters.append(angle)
last = (np.cos(angle/2))**2 #The last solution is needed to solve the next one
angle = 2 * np.arcsin(np.sqrt(pdf[mid-1]/last))
parameters.append(angle)
last = (np.cos(angle/2))**2
for i in range(mid, qubits-1):
angle = 2 * np.arccos(np.sqrt(pdf[i]/last))
parameters.append(angle)
last *= (np.sin(angle/2))**2
return parameters
def measure_probability(q):
"""Measuring gates on the unary basis qubits to check the validity of the amplitude distributor.
Args:
q (list): quantum register encoding the asset's price in the unary bases.
Returns:
generator that yels the measuring gates to check the probability distribution.
"""
yield gates.M(*q, register_name='prob') #No measure on the ancilla qubit is necessary
def extract_probability(qubits, counts, samples):
"""Measuring gates on the unary basis qubits to check the validity of the amplitude distributor.
Args:
qubits (int): number of qubits used for the unary basis.
counts (dict): times each output has been measured.
samples (int): number of samples for normalization.
Returns:
prob (list): normalized probabilities for the measured outcomes.
"""
form = '{0:0%sb}' % str(qubits) # qubits?
prob = []
for i in reversed(range(qubits)):
prob.append(counts.get(form.format(2**i), 0)/samples)
return prob
def get_pdf(qubits, S0, sig, r, T):
"""Get a pdf to input into the quantum register from a target probability distribution.
Args:
qubits (int): number of qubits used for the unary basis.
S0 (real): initial asset price.
sig (real): market volatility.
r (real): market rate.
T (real): maturity time.
Returns:
values (np.array): price values associated to the unary basis.
pdf (np.array): probability distribution for the asset's price evolution.
"""
mu = (r - 0.5 * sig ** 2) * T + np.log(S0)
mean = np.exp(mu + 0.5 * T * sig ** 2)
variance = (np.exp(T * sig ** 2) - 1) * np.exp(2 * mu + T * sig ** 2)
values = np.linspace(max(mean - 3 * np.sqrt(variance), 0), mean + 3 * np.sqrt(variance), qubits)
pdf = aux.log_normal(values, mu, sig * np.sqrt(T))
return values, pdf
def load_quantum_sim(qu, S0, sig, r, T):
"""Get a pdf to input into the quantum register from a target probability distribution.
Args:
qubits (int): number of qubits used for the unary basis.
S0 (real): initial asset price.
sig (real): market volatility.
r (real): market rate.
T (real): maturity time.
Returns:
circuit (Circuit): quantum circuit with the target probability encoded in the unary basis
values (np.array): price values associated to the unary basis.
pdf (np.array): probability distribution for the asset's price evolution.
"""
(values, pdf) = get_pdf(qu, S0, sig, r, T)
q, ancilla, circuit = create_qc(qu)
lognormal_parameters = rw_parameters(qu, pdf) # Solve for the parameters needed to create the target lognormal distribution
circuit.add(rw_circuit(qu, lognormal_parameters)) # Build the probaility loading circuit with the adjusted parameters
circuit.add(measure_probability(q)) #Circuit to test the precision of the probability loading algorithm
return circuit, (values, pdf)
def run_quantum_sim(qubits, circuit, shots):
"""Execute the quantum circuit and extract the probability of measuring each state of the unary basis
Args:
qubits (int): number of qubits used for the unary basis.
circuit (Circuit): quantum circuit with the target probability encoded in the unary basis.
shots (int): number of samples to extract from the circuit.
Returns:
prob_sim (list): normalized probability of each possible output in the unary basis.
"""
result = circuit(nshots=shots)
frequencies = result.frequencies(binary=True, registers=False)
prob_sim = extract_probability(qubits, frequencies, shots)
return prob_sim
def payoff_circuit(qubits, ancilla, K, S):
"""Quantum circuit that encodes the expected payoff into the probability of measuring an acilla qubit.
Args:
qubits (int): number of qubits used for the unary basis.
ancilla (int): qubit that encodes the payoff of the options.
K (real): strike price.
S (np.array): equivalent asset price for each element of the unary basis.
Returns:
generator that yields the gates required to encode the payoff into an ancillary qubit.
"""
for i in range(qubits): #Determine the first qubit's price that
qK = i #surpasses the strike price
if K<S[i]:
break
for i in range(qK, qubits): #Control-RY rotations controled by states
angle = 2 * np.arcsin(np.sqrt((S[i]-K)/(S[qubits-1]-K))) #with higher value than the strike
yield gates.RY(ancilla, angle).controlled_by(i) #targeting the ancilla qubit
def payoff_circuit_inv(qubits, ancilla, K, S):
"""Quantum circuit that encodes the expected payoff into the probability of measuring an acilla qubit in reverse.
Circuit used in the amplitude estimation part of the algorithm.
Args:
qubits (int): number of qubits used for the unary basis.
ancilla (int): qubit that encodes the payoff of the options.
K (real): strike price.
S (np.array): equivalent asset price for each element of the unary basis.
Returns:
generator that yields the gates required for the inverse of the circuit used to encode
the payoff into an ancillary qubit.
"""
for i in range(qubits): #Determine the first qubit's price that
qK = i #surpasses the strike price
if K<S[i]:
break
for i in range(qK, qubits): #Control-RY rotations controled by states
angle = 2 * np.arcsin(np.sqrt((S[i]-K)/(S[qubits-1]-K))) #with higher value than the strike
yield gates.RY(ancilla, -angle).controlled_by(i) #targeting the ancilla qubit
def measure_payoff(q, ancilla):
"""Measurement gates needed to measure the expected payoff and perform post-selection
Args:
q (list): quantum register encoding the asset's price in the unary bases.
ancilla (int): qubit that encodes the payoff of the options.
Returns:
generator that yields the measurement gates to recover the expected payoff.
"""
yield gates.M(*(q+[ancilla]), register_name='payoff')
def load_payoff_quantum_sim(qubits, S0, sig, r, T, K):
"""Measurement gates needed to measure the expected payoff and perform post-selection
Args:
qubits (int): number of qubits used for the unary basis.
S0 (real): initial asset price.
sig (real): market volatility.
r (real): market rate.
T (real): maturity time.
K (real): strike price.
Returns:
circuit (Circuit): full quantum circuit with the amplitude distributor and payoff estimator.
S (np.array): equivalent asset price for each element of the unary basis.
"""
mu = (r - 0.5 * sig ** 2) * T + np.log(S0)
mean = np.exp(mu + 0.5 * T * sig ** 2)
variance = (np.exp(T * sig ** 2) - 1) * np.exp(2 * mu + T * sig ** 2)
S = np.linspace(max(mean - 3 * np.sqrt(variance), 0), mean + 3 * np.sqrt(variance), qubits)
ln = aux.log_normal(S, mu, sig * np.sqrt(T))
q, ancilla, circuit = create_qc(qubits)
lognormal_parameters = rw_parameters(qubits, ln)
circuit.add(rw_circuit(qubits, lognormal_parameters))
circuit.add(payoff_circuit(qubits, ancilla, K, S))
circuit.add(measure_payoff(q, ancilla))
return circuit, S
def run_payoff_quantum_sim(qubits, circuit, shots, S, K):
"""Exacute the circuit that estimates the payoff of the option in the unary representation. Includes
post-selection scheme.
Args:
qubits (int): number of qubits used for the unary basis.
circuit (Circuit): full quantum circuit with the amplitude distributor and payoff estimator.
shots (int): number of shots to be performed
S (np.array): equivalent asset price for each element of the unary basis.
K (real): strike price.
Returns:
qu_payoff_sim (real): estimated payoff from the probability of the ancillary qubit.
"""
job_payoff_sim = circuit(nshots=shots)
counts_payoff_sim = job_payoff_sim.frequencies(binary=True, registers=False)
ones=0
zeroes=0
for key in counts_payoff_sim.keys(): # Post-selection
unary = 0
for i in range(0,qubits):
unary+=int(key[i])
if unary==1:
if int(key[qubits])==0:
zeroes+=counts_payoff_sim.get(key)
else:
ones+=counts_payoff_sim.get(key)
qu_payoff_sim = ones * (S[qubits - 1]-K) / (ones + zeroes)
return qu_payoff_sim
def diff_qu_cl(qu_payoff_sim, cl_payoff):
"""Calculation of the error from the simulated results and the classical expeted value.
Args:
qu_payoff_sim (real): estimated payoff from the probability of the ancillary qubit.
cl_payoff (real): exact value computed classically.
Returns:
error (real): relative error between the simulated and exact result, in percentage.
"""
error = (100 * np.abs(qu_payoff_sim - cl_payoff) / cl_payoff)
return error
def diffusion_operator(qubits):
"""Quantum circuit that performs the diffusion operator, part of the amplitude estimation algorithm.
Args:
qubits (int): number of qubits used for the unary basis.
Returns:
generator that yield the necessary gates to perform the diffusion operator.
"""
if qubits%2==0:
mid = int(qubits/2)
else:
mid = int((qubits-1)/2) #The random walk starts from the middle qubit
yield gates.X(qubits)
yield gates.H(qubits)
yield gates.CNOT(mid, qubits)
yield gates.H(qubits)
yield gates.X(qubits)
def oracle_operator(qubits):
"""Quantum circuit that performs the oracle operator, part of the amplitude estimation algorithm.
Args:
qubits (int): number of qubits used for the unary basis.
Returns:
generator that yield the necessary gates to perform the oracke operator.
"""
yield gates.Z(qubits)
def Q(qubits, ancilla, K, S, lognormal_parameters):
"""Quantum circuit that performs the main operator for the amplitude estimation algorithm.
Args:
qubits (int): number of qubits used for the unary basis.
ancilla (int): qubit that encodes the payoff of the options.
K (real): strike price.
S (np.array): equivalent asset price for each element of the unary basis.
lognormal_parameters (list): values to be introduces into the fSim gates for amplitude distribution.
Returns:
generator that yield the necessary gates to perform the main operator for AE.
"""
yield oracle_operator(qubits)
yield payoff_circuit_inv(qubits, ancilla, K, S)
yield rw_circuit_inv(qubits, lognormal_parameters, X=False)
yield diffusion_operator(qubits)
yield rw_circuit(qubits, lognormal_parameters, X=False)
yield payoff_circuit(qubits, ancilla, K, S)
def load_Q_operator(qubits, iterations, S0, sig, r, T, K):
"""Quantum circuit that performs the main operator for the amplitude estimation algorithm.
Args:
qubits (int): number of qubits used for the unary basis.
iterations (int): number of consecutive implementations of operator Q.
S0 (real): initial asset price.
sig (real): market volatility.
r (real): market rate.
T (real): maturity time.
K (real): strike price.
Returns:
circuit (Circuit): quantum circuit that performs the m=iterations step of the iterative
amplitude estimation algorithm.
"""
iterations = int(iterations)
mu = (r - 0.5 * sig ** 2) * T + np.log(S0)
mean = np.exp(mu + 0.5 * T * sig ** 2)
variance = (np.exp(T * sig ** 2) - 1) * np.exp(2 * mu + T * sig ** 2)
S = np.linspace(max(mean - 3 * np.sqrt(variance), 0), mean + 3 * np.sqrt(variance), qubits)
ln = aux.log_normal(S, mu, sig * np.sqrt(T))
lognormal_parameters = rw_parameters(qubits,ln)
q, ancilla, circuit = create_qc(qubits)
circuit.add(rw_circuit(qubits, lognormal_parameters))
circuit.add(payoff_circuit(qubits, ancilla, K, S))
for i in range(iterations):
circuit.add(Q(qubits, ancilla, K, S, lognormal_parameters))
circuit.add(measure_payoff(q, ancilla))
return circuit
def run_Q_operator(qubits, circuit, shots):
"""Execution of the quantum circuit for a step in the used amplitude estimation algorithm.
Args:
qubits (int): number of qubits used for the unary basis.
circuit (Circuit): quantum circuit that performs the m=iterations step of the iterative
amplitude estimation algorithm.
shots (int): number of shots to be taken in intermediate steps of the AE algorithm.
Returns:
ones (int): number of measured ones after post-selection.
zeroes (int): number of measured zeroes after post-selection.
"""
job_payoff_sim = circuit(nshots=shots)
counts_payoff_sim = job_payoff_sim.frequencies(binary=True, registers=False)
ones = 0
zeroes = 0
for key in counts_payoff_sim.keys():
unary = 0
for i in range(0, qubits):
unary += int(key[i])
if unary == 1:
if int(key[qubits]) == 0:
zeroes += counts_payoff_sim.get(key)
else:
ones += counts_payoff_sim.get(key)
return ones, zeroes
def paint_prob_distribution(bins, prob_sim, S0, sig, r, T):
"""Funtion that returns a histogram with the probabilities of the outcome measures and compares it
with the target probability distribution.
Args:
bins (int): number of bins of precision.
prob_sim (list): probabilities from measuring the quantum circuit.
S0 (real): initial asset price.
sig (real): market volatility.
r (real): market rate.
T (real): maturity time.
Returns:
image of the probability histogram in a .png file.
"""
from scipy.integrate import trapz
mu = (r - 0.5 * sig ** 2) * T + np.log(S0)
mean = np.exp(mu + 0.5 * T * sig ** 2)
variance = (np.exp(T * sig ** 2) - 1) * np.exp(2 * mu + T * sig ** 2)
S = np.linspace(max(mean - 3 * np.sqrt(variance), 0), mean + 3 * np.sqrt(variance),bins)
width = (S[1] - S[0]) / 1.2
fig, ax = plt.subplots()
ax.bar(S, prob_sim, width, label='Quantum', alpha=0.8)
x = np.linspace(max(mean - 3 * np.sqrt(variance), 0), mean + 3 * np.sqrt(variance), bins * 100)
y = aux.log_normal(x, mu, sig * np.sqrt(T))
y = y * trapz(prob_sim, S) / trapz(y, x)
ax.plot(x, y, label='PDF', color='black')
plt.ylabel('Probability')
plt.xlabel('Option price')
plt.title('Option price distribution for {} qubits '.format(bins))
ax.legend()
fig.tight_layout()
fig.savefig('Probability_distribution.png')
def paint_AE(a, a_conf, bins, M, data, shots=10000, alpha = 0.05):
"""Visualization of the results of applying amplitude estimation to the option pricing algorithm.
Args:
a (np.array): estimated values for the probability of measuring the ancilla.
a_conf (np.array): errors on the estimation of the probability of measuring the ancilla.
bins (int): number of bins of precision.
M (int): total number of aplications of the Q operator.
data (tuple): data necessary to characterize the probability distribution.
shots (int): number of shots to be taken in intermediate steps of the AE algorithm.
alpha (real): confidence interval.
Returns:
images of the results and uncertainties of performing amplitude estimation up to M times in .png format.
"""
S0, sig, r, T, K = data
values, pdf = get_pdf(bins, S0, sig, r, T)
a_un = np.sum(pdf[values >= K] * (values[values >= K] - K))
cl_payoff = aux.classical_payoff(S0, sig, r, T, K, samples=1000000)
fig, ax = plt.subplots()
un_data = a * (values[bins - 1] - K)
un_conf = a_conf * (values[bins - 1] - K)
ax.scatter(np.arange(M + 1), un_data, c='C0', marker='x', zorder=10, label='Measurements')
ax.fill_between(np.arange(M + 1), un_data - un_conf, un_data + un_conf, color='C0', alpha=0.3)
ax.plot([0, M], [cl_payoff, cl_payoff], c='black', ls='--', label='Cl. payoff')
ax.plot([0, M], [a_un, a_un], c='blue', ls='--', label='Optimal approximation')
ax.set(ylim=[0.15, 0.17])
ax.legend()
fig.tight_layout()
fig.savefig('Amplitude_Estimation_Results.png')
from scipy.special import erfinv
z = erfinv(1 - alpha / 2)
fig, bx = plt.subplots()
bx.scatter(np.arange(M + 1), un_conf, c='C0', marker='x', zorder=10, label='Measurements')
a_max = (np.max(values) - K)
bound_down = np.sqrt(un_data) * np.sqrt(a_max - un_data) * z / np.sqrt(shots) / np.cumsum(
1 + 2 * ( | np.arange(M + 1) | numpy.arange |
#!/usr/bin/env python
# Copyright 2014-2020 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from pyscf import lib
from pyscf import ao2mo
from pyscf.cc.rintermediates import _get_vvvv # noqa
from pyscf.cc.ccsd import BLKMIN
# Ref: Gauss and Stanton, J. Chem. Phys. 103, 3561 (1995) Table III
def make_tau(t2, t1, r1, fac=1, out=None):
t1a, t1b = t1
r1a, r1b = r1
tau1aa = make_tau_aa(t2[0], t1a, r1a, fac, out)
tau1bb = make_tau_aa(t2[2], t1b, r1b, fac, out)
tau1ab = make_tau_ab(t2[1], t1, r1, fac, out)
return tau1aa, tau1ab, tau1bb
def make_tau_aa(t2aa, t1a, r1a, fac=1, out=None):
tau1aa = np.einsum('ia,jb->ijab', t1a, r1a)
tau1aa-= np.einsum('ia,jb->jiab', t1a, r1a)
tau1aa = tau1aa - tau1aa.transpose(0,1,3,2)
tau1aa *= fac * .5
tau1aa += t2aa
return tau1aa
def make_tau_ab(t2ab, t1, r1, fac=1, out=None):
t1a, t1b = t1
r1a, r1b = r1
tau1ab = np.einsum('ia,jb->ijab', t1a, r1b)
tau1ab+= np.einsum('ia,jb->ijab', r1a, t1b)
tau1ab *= fac * .5
tau1ab += t2ab
return tau1ab
def Foo(t1, t2, eris):
t1a, t1b = t1
t2aa, t2ab, t2bb = t2
nocca, noccb, nvira, nvirb = t2ab.shape
eris_ovov = np.asarray(eris.ovov)
eris_OVOV = np.asarray(eris.OVOV)
eris_ovOV = np.asarray(eris.ovOV)
tilaa, tilab, tilbb = make_tau(t2, t1, t1, fac=0.5)
Fooa = lib.einsum('inef,menf->mi', tilaa, eris_ovov)
Fooa += lib.einsum('iNeF,meNF->mi', tilab, eris_ovOV)
Foob = lib.einsum('inef,menf->mi', tilbb, eris_OVOV)
Foob += lib.einsum('nIfE,nfME->MI', tilab, eris_ovOV)
eris_ovoo = np.asarray(eris.ovoo)
eris_OVOO = np.asarray(eris.OVOO)
eris_OVoo = np.asarray(eris.OVoo)
eris_ovOO = np.asarray(eris.ovOO)
Fooa += np.einsum('ne,nemi->mi', t1a, eris_ovoo)
Fooa -= np.einsum('ne,meni->mi', t1a, eris_ovoo)
Fooa += np.einsum('NE,NEmi->mi', t1b, eris_OVoo)
Foob += np.einsum('ne,nemi->mi', t1b, eris_OVOO)
Foob -= np.einsum('ne,meni->mi', t1b, eris_OVOO)
Foob += np.einsum('ne,neMI->MI', t1a, eris_ovOO)
fooa = eris.focka[:nocca,:nocca]
foob = eris.fockb[:noccb,:noccb]
fova = eris.focka[:nocca,nocca:]
fovb = eris.fockb[:noccb,noccb:]
Fova, Fovb = Fov(t1, t2, eris)
Fooa += fooa + 0.5*lib.einsum('me,ie->mi', Fova+fova, t1a)
Foob += foob + 0.5*lib.einsum('me,ie->mi', Fovb+fovb, t1b)
return Fooa, Foob
def Fvv(t1, t2, eris):
t1a, t1b = t1
t2aa, t2ab, t2bb = t2
nocca, noccb, nvira, nvirb = t2ab.shape
Fvva = 0
Fvvb = 0
tauaa, tauab, taubb = make_tau(t2, t1, t1)
#:eris_ovvv = lib.unpack_tril(np.asarray(eris.ovvv).reshape(nocca*nvira,-1)).reshape(nocca,nvira,nvira,nvira)
#:ovvv = eris_ovvv - eris_ovvv.transpose(0,3,2,1)
#:self.Fvva = np.einsum('mf,mfae->ae', t1a, ovvv)
mem_now = lib.current_memory()[0]
max_memory = max(0, lib.param.MAX_MEMORY - mem_now)
blksize = min(nocca, max(BLKMIN, int(max_memory*1e6/8/(nvira**3*3))))
for p0,p1 in lib.prange(0, nocca, blksize):
ovvv = eris.get_ovvv(slice(p0,p1)) # ovvv = eris.ovvv[p0:p1]
ovvv = ovvv - ovvv.transpose(0,3,2,1)
Fvva += np.einsum('mf,mfae->ae', t1a[p0:p1], ovvv)
ovvv = None
#:eris_OVVV = lib.unpack_tril(np.asarray(eris.OVVV).reshape(noccb*nvirb,-1)).reshape(noccb,nvirb,nvirb,nvirb)
#:OVVV = eris_OVVV - eris_OVVV.transpose(0,3,2,1)
#:self.Fvvb = np.einsum('mf,mfae->ae', t1b, OVVV)
#:self.wOVVO = lib.einsum('jf,mebf->mbej', t1b, OVVV)
#:self.wOVOO = 0.5 * lib.einsum('mebf,ijef->mbij', OVVV, taubb)
blksize = min(noccb, max(BLKMIN, int(max_memory*1e6/8/(nvirb**3*3))))
for p0, p1 in lib.prange(0, noccb, blksize):
OVVV = eris.get_OVVV(slice(p0,p1)) # OVVV = eris.OVVV[p0:p1]
OVVV = OVVV - OVVV.transpose(0,3,2,1)
Fvvb += np.einsum('mf,mfae->ae', t1b[p0:p1], OVVV)
OVVV = None
#:eris_ovVV = lib.unpack_tril(np.asarray(eris.ovVV).reshape(nocca*nvira,-1)).reshape(nocca,nvira,nvirb,nvirb)
#:self.Fvvb += np.einsum('mf,mfAE->AE', t1a, eris_ovVV)
#:self.woVvO = lib.einsum('JF,meBF->mBeJ', t1b, eris_ovVV)
#:self.woVVo = lib.einsum('jf,mfBE->mBEj',-t1a, eris_ovVV)
#:self.woVoO = 0.5 * lib.einsum('meBF,iJeF->mBiJ', eris_ovVV, tauab)
#:self.woVoO += 0.5 * lib.einsum('mfBE,iJfE->mBiJ', eris_ovVV, tauab)
blksize = min(nocca, max(BLKMIN, int(max_memory*1e6/8/(nvira*nvirb**2*3))))
for p0,p1 in lib.prange(0, nocca, blksize):
ovVV = eris.get_ovVV(slice(p0,p1)) # ovVV = eris.ovVV[p0:p1]
Fvvb += np.einsum('mf,mfAE->AE', t1a[p0:p1], ovVV)
ovVV = None
#:eris_OVvv = lib.unpack_tril(np.asarray(eris.OVvv).reshape(noccb*nvirb,-1)).reshape(noccb,nvirb,nvira,nvira)
#:self.Fvva += np.einsum('MF,MFae->ae', t1b, eris_OVvv)
#:self.wOvVo = lib.einsum('jf,MEbf->MbEj', t1a, eris_OVvv)
#:self.wOvvO = lib.einsum('JF,MFbe->MbeJ',-t1b, eris_OVvv)
#:self.wOvOo = 0.5 * lib.einsum('MEbf,jIfE->MbIj', eris_OVvv, tauab)
#:self.wOvOo += 0.5 * lib.einsum('MFbe,jIeF->MbIj', eris_OVvv, tauab)
blksize = min(noccb, max(BLKMIN, int(max_memory*1e6/8/(nvirb*nvira**2*3))))
for p0, p1 in lib.prange(0, noccb, blksize):
OVvv = eris.get_OVvv(slice(p0,p1)) # OVvv = eris.OVvv[p0:p1]
Fvva += np.einsum('MF,MFae->ae', t1b[p0:p1], OVvv)
OVvv = None
eris_ovov = np.asarray(eris.ovov)
eris_OVOV = np.asarray(eris.OVOV)
eris_ovOV = np.asarray(eris.ovOV)
tilaa, tilab, tilbb = make_tau(t2, t1, t1, fac=0.5)
Fvva -= lib.einsum('mnaf,menf->ae', tilaa, eris_ovov)
Fvva -= lib.einsum('mNaF,meNF->ae', tilab, eris_ovOV)
Fvvb -= lib.einsum('mnaf,menf->ae', tilbb, eris_OVOV)
Fvvb -= lib.einsum('nMfA,nfME->AE', tilab, eris_ovOV)
fvva = eris.focka[nocca:,nocca:]
fvvb = eris.fockb[noccb:,noccb:]
fova = eris.focka[:nocca,nocca:]
fovb = eris.fockb[:noccb,noccb:]
Fova, Fovb = Fov(t1, t2, eris)
Fvva += fvva - 0.5*lib.einsum('me,ma->ae', Fova+fova, t1a)
Fvvb += fvvb - 0.5*lib.einsum('me,ma->ae', Fovb+fovb, t1b)
return Fvva, Fvvb
def Fov(t1, t2, eris):
t1a, t1b = t1
t2aa, t2ab, t2bb = t2
nocca, noccb, nvira, nvirb = t2ab.shape
fova = eris.focka[:nocca,nocca:]
fovb = eris.fockb[:noccb,noccb:]
eris_ovov = np.asarray(eris.ovov)
eris_OVOV = np.asarray(eris.OVOV)
eris_ovOV = np.asarray(eris.ovOV)
ovov = eris_ovov - eris_ovov.transpose(0,3,2,1)
OVOV = eris_OVOV - eris_OVOV.transpose(0,3,2,1)
Fova = np.einsum('nf,menf->me', t1a, ovov)
Fova+= np.einsum('NF,meNF->me', t1b, eris_ovOV)
Fova += fova
Fovb = np.einsum('nf,menf->me', t1b, OVOV)
Fovb+= np.einsum('nf,nfME->ME', t1a, eris_ovOV)
Fovb += fovb
return Fova, Fovb
def Woooo(t1, t2, eris):
t1a, t1b = t1
t2aa, t2ab, t2bb = t2
eris_ovoo = np.asarray(eris.ovoo)
eris_OVOO = np.asarray(eris.OVOO)
eris_OVoo = np.asarray(eris.OVoo)
eris_ovOO = np.asarray(eris.ovOO)
ovoo = eris_ovoo - eris_ovoo.transpose(2,1,0,3)
OVOO = eris_OVOO - eris_OVOO.transpose(2,1,0,3)
woooo = lib.einsum('je,nemi->minj', t1a, ovoo)
wOOOO = lib.einsum('je,nemi->minj', t1b, OVOO)
wooOO = lib.einsum('JE,NEmi->miNJ', t1b, eris_OVoo)
woOOo = lib.einsum('je,meNI->mINj',-t1a, eris_ovOO)
woooo += np.asarray(eris.oooo)
wOOOO += np.asarray(eris.OOOO)
wooOO += np.asarray(eris.ooOO)
woooo = woooo - woooo.transpose(0,3,2,1)
wOOOO = wOOOO - wOOOO.transpose(0,3,2,1)
wooOO = wooOO - woOOo.transpose(0,3,2,1)
eris_ovov = np.asarray(eris.ovov)
eris_OVOV = np.asarray(eris.OVOV)
eris_ovOV = np.asarray(eris.ovOV)
ovov = eris_ovov - eris_ovov.transpose(0,3,2,1)
OVOV = eris_OVOV - eris_OVOV.transpose(0,3,2,1)
tauaa, tauab, taubb = make_tau(t2, t1, t1)
woooo += 0.5*lib.einsum('ijef,menf->minj', tauaa, ovov)
wOOOO += 0.5*lib.einsum('ijef,menf->minj', taubb, OVOV)
wooOO += lib.einsum('iJeF,meNF->miNJ', tauab, eris_ovOV)
wOOoo = None
return woooo, wooOO, wOOoo, wOOOO
def Wooov(t1, t2, eris):
t1a, t1b = t1
t2aa, t2ab, t2bb = t2
dtype = np.result_type(t1a, t1b, t2aa, t2ab, t2bb)
eris_ovoo = np.asarray(eris.ovoo)
eris_OVOO = np.asarray(eris.OVOO)
eris_OVoo = np.asarray(eris.OVoo)
eris_ovOO = np.asarray(eris.ovOO)
ovoo = eris_ovoo - eris_ovoo.transpose(2,1,0,3)
OVOO = eris_OVOO - eris_OVOO.transpose(2,1,0,3)
wooov = np.array( ovoo.transpose(2,3,0,1), dtype=dtype)
wOOOV = np.array( OVOO.transpose(2,3,0,1), dtype=dtype)
wooOV = np.array(eris_OVoo.transpose(2,3,0,1), dtype=dtype)
wOOov = np.array(eris_ovOO.transpose(2,3,0,1), dtype=dtype)
eris_ovoo = eris_OVOO = eris_ovOO = eris_OVoo = None
eris_ovov = np.asarray(eris.ovov)
eris_OVOV = np.asarray(eris.OVOV)
eris_ovOV = np.asarray(eris.ovOV)
ovov = eris_ovov - eris_ovov.transpose(0,3,2,1)
OVOV = eris_OVOV - eris_OVOV.transpose(0,3,2,1)
wooov += lib.einsum('if,mfne->mine', t1a, ovov)
wOOOV += lib.einsum('if,mfne->mine', t1b, OVOV)
wooOV += lib.einsum('if,mfNE->miNE', t1a, eris_ovOV)
wOOov += lib.einsum('IF,neMF->MIne', t1b, eris_ovOV)
return wooov, wooOV, wOOov, wOOOV
def Woovo(t1, t2, eris):
t1a, t1b = t1
t2aa, t2ab, t2bb = t2
nocca, noccb, nvira, nvirb = t2ab.shape
dtype = np.result_type(t1a, t1b, t2aa, t2ab, t2bb)
wovoo = np.zeros((nocca,nvira,nocca,nocca), dtype=dtype)
wOVOO = np.zeros((noccb,nvirb,noccb,noccb), dtype=dtype)
woVoO = np.zeros((nocca,nvirb,nocca,noccb), dtype=dtype)
wOvOo = np.zeros((noccb,nvira,noccb,nocca), dtype=dtype)
tauaa, tauab, taubb = make_tau(t2, t1, t1)
#:eris_ovvv = lib.unpack_tril(np.asarray(eris.ovvv).reshape(nocca*nvira,-1)).reshape(nocca,nvira,nvira,nvira)
#:ovvv = eris_ovvv - eris_ovvv.transpose(0,3,2,1)
#:self.wovoo = 0.5 * lib.einsum('mebf,ijef->mbij', eris_ovvv, tauaa)
#:self.wovoo -= 0.5 * lib.einsum('mfbe,ijef->mbij', eris_ovvv, tauaa)
mem_now = lib.current_memory()[0]
max_memory = max(0, lib.param.MAX_MEMORY - mem_now)
blksize = min(nocca, max(BLKMIN, int(max_memory*1e6/8/(nvira**3*3))))
for p0,p1 in lib.prange(0, nocca, blksize):
ovvv = eris.get_ovvv(slice(p0,p1)) # ovvv = eris.ovvv[p0:p1]
ovvv = ovvv - ovvv.transpose(0,3,2,1)
wovoo[p0:p1] = 0.5 * lib.einsum('mebf,ijef->mbij', ovvv, tauaa)
ovvv = None
#:eris_OVVV = lib.unpack_tril(np.asarray(eris.OVVV).reshape(noccb*nvirb,-1)).reshape(noccb,nvirb,nvirb,nvirb)
#:OVVV = eris_OVVV - eris_OVVV.transpose(0,3,2,1)
#:self.wOVOO = 0.5 * lib.einsum('mebf,ijef->mbij', OVVV, taubb)
blksize = min(noccb, max(BLKMIN, int(max_memory*1e6/8/(nvirb**3*3))))
for p0, p1 in lib.prange(0, noccb, blksize):
OVVV = eris.get_OVVV(slice(p0,p1)) # OVVV = eris.OVVV[p0:p1]
OVVV = OVVV - OVVV.transpose(0,3,2,1)
wOVOO[p0:p1] = 0.5 * lib.einsum('mebf,ijef->mbij', OVVV, taubb)
OVVV = None
#:eris_ovVV = lib.unpack_tril(np.asarray(eris.ovVV).reshape(nocca*nvira,-1)).reshape(nocca,nvira,nvirb,nvirb)
#:self.Fvvb += np.einsum('mf,mfAE->AE', t1a, eris_ovVV)
#:self.woVvO = lib.einsum('JF,meBF->mBeJ', t1b, eris_ovVV)
#:self.woVVo = lib.einsum('jf,mfBE->mBEj',-t1a, eris_ovVV)
#:self.woVoO = 0.5 * lib.einsum('meBF,iJeF->mBiJ', eris_ovVV, tauab)
#:self.woVoO += 0.5 * lib.einsum('mfBE,iJfE->mBiJ', eris_ovVV, tauab)
blksize = min(nocca, max(BLKMIN, int(max_memory*1e6/8/(nvira*nvirb**2*3))))
for p0,p1 in lib.prange(0, nocca, blksize):
ovVV = eris.get_ovVV(slice(p0,p1)) # ovVV = eris.ovVV[p0:p1]
woVoO[p0:p1] = 0.5 * lib.einsum('meBF,iJeF->mBiJ', ovVV, tauab)
woVoO[p0:p1]+= 0.5 * lib.einsum('mfBE,iJfE->mBiJ', ovVV, tauab)
ovVV = None
#:eris_OVvv = lib.unpack_tril(np.asarray(eris.OVvv).reshape(noccb*nvirb,-1)).reshape(noccb,nvirb,nvira,nvira)
#:self.Fvva += np.einsum('MF,MFae->ae', t1b, eris_OVvv)
#:self.wOvVo = lib.einsum('jf,MEbf->MbEj', t1a, eris_OVvv)
#:self.wOvvO = lib.einsum('JF,MFbe->MbeJ',-t1b, eris_OVvv)
#:self.wOvOo = 0.5 * lib.einsum('MEbf,jIfE->MbIj', eris_OVvv, tauab)
#:self.wOvOo += 0.5 * lib.einsum('MFbe,jIeF->MbIj', eris_OVvv, tauab)
blksize = min(noccb, max(BLKMIN, int(max_memory*1e6/8/(nvirb*nvira**2*3))))
for p0, p1 in lib.prange(0, noccb, blksize):
OVvv = eris.get_OVvv(slice(p0,p1)) # OVvv = eris.OVvv[p0:p1]
wOvOo[p0:p1] = 0.5 * lib.einsum('MEbf,jIfE->MbIj', OVvv, tauab)
wOvOo[p0:p1]+= 0.5 * lib.einsum('MFbe,jIeF->MbIj', OVvv, tauab)
OVvv = None
eris_ovov = np.asarray(eris.ovov)
eris_OVOV = np.asarray(eris.OVOV)
eris_ovOV = np.asarray(eris.ovOV)
ovov = eris_ovov - eris_ovov.transpose(0,3,2,1)
OVOV = eris_OVOV - eris_OVOV.transpose(0,3,2,1)
tilaa, tilab, tilbb = make_tau(t2, t1, t1, fac=0.5)
eris_ovoo = np.asarray(eris.ovoo)
eris_OVOO = np.asarray(eris.OVOO)
eris_OVoo = np.asarray(eris.OVoo)
eris_ovOO = np.asarray(eris.ovOO)
ovoo = eris_ovoo - eris_ovoo.transpose(2,1,0,3)
OVOO = eris_OVOO - eris_OVOO.transpose(2,1,0,3)
tmpaa = lib.einsum('nemi,jnbe->mbij', ovoo, t2aa)
tmpaa+= lib.einsum('NEmi,jNbE->mbij', eris_OVoo, t2ab)
tmpbb = lib.einsum('nemi,jnbe->mbij', OVOO, t2bb)
tmpbb+= lib.einsum('neMI,nJeB->MBIJ', eris_ovOO, t2ab)
woVoO += lib.einsum('nemi,nJeB->mBiJ', ovoo, t2ab)
woVoO += lib.einsum('NEmi,JNBE->mBiJ', eris_OVoo, t2bb)
woVoO -= lib.einsum('meNI,jNeB->mBjI', eris_ovOO, t2ab)
wOvOo += lib.einsum('NEMI,jNbE->MbIj', OVOO, t2ab)
wOvOo += lib.einsum('neMI,jnbe->MbIj', eris_ovOO, t2aa)
wOvOo -= lib.einsum('MEni,nJbE->MbJi', eris_OVoo, t2ab)
wovoo += tmpaa - tmpaa.transpose(0,1,3,2)
wOVOO += tmpbb - tmpbb.transpose(0,1,3,2)
tmp1aa = lib.einsum('njbf,menf->mbej', t2aa, ovov)
tmp1aa-= lib.einsum('jNbF,meNF->mbej', t2ab, eris_ovOV)
tmp1bb = lib.einsum('njbf,menf->mbej', t2bb, OVOV)
tmp1bb-= lib.einsum('nJfB,nfME->MBEJ', t2ab, eris_ovOV)
tmp1ab = lib.einsum('NJBF,meNF->mBeJ', t2bb, eris_ovOV)
tmp1ab-= lib.einsum('nJfB,menf->mBeJ', t2ab, ovov)
tmp1ba = lib.einsum('njbf,nfME->MbEj', t2aa, eris_ovOV)
tmp1ba-= lib.einsum('jNbF,MENF->MbEj', t2ab, OVOV)
tmp1abba =-lib.einsum('jNfB,mfNE->mBEj', t2ab, eris_ovOV)
tmp1baab =-lib.einsum('nJbF,neMF->MbeJ', t2ab, eris_ovOV)
tmpaa = lib.einsum('ie,mbej->mbij', t1a, tmp1aa)
tmpbb = lib.einsum('ie,mbej->mbij', t1b, tmp1bb)
tmpab = lib.einsum('ie,mBeJ->mBiJ', t1a, tmp1ab)
tmpab-= lib.einsum('IE,mBEj->mBjI', t1b, tmp1abba)
tmpba = lib.einsum('IE,MbEj->MbIj', t1b, tmp1ba)
tmpba-= lib.einsum('ie,MbeJ->MbJi', t1a, tmp1baab)
wovoo -= tmpaa - tmpaa.transpose(0,1,3,2)
wOVOO -= tmpbb - tmpbb.transpose(0,1,3,2)
woVoO -= tmpab
wOvOo -= tmpba
eris_ovov = eris_OVOV = eris_ovOV = None
eris_ovoo = np.asarray(eris.ovoo)
eris_OVOO = np.asarray(eris.OVOO)
eris_ovOO = np.asarray(eris.ovOO)
eris_OVoo = np.asarray(eris.OVoo)
wovoo += eris_ovoo.transpose(3,1,2,0) - eris_ovoo.transpose(2,1,0,3)
wOVOO += eris_OVOO.transpose(3,1,2,0) - eris_OVOO.transpose(2,1,0,3)
woVoO += eris_OVoo.transpose(3,1,2,0)
wOvOo += eris_ovOO.transpose(3,1,2,0)
eris_ovoo = eris_OVOO = eris_ovOO = eris_OVoo = None
eris_ovvo = np.asarray(eris.ovvo)
eris_OVVO = np.asarray(eris.OVVO)
eris_OVvo = | np.asarray(eris.OVvo) | numpy.asarray |
"""Analysis tab"""
from sample.widgets import responsive as tk, pyplot, utils, audio, logging
from sample import plots
from tkinter import messagebox, filedialog
import numpy as np
import json
import os
class AnalysisTab(utils.DataOnRootMixin, tk.Frame):
"""Tab for SAMPLE analysis
Args:
pad_top_w (float): Padding for top axes width (as a fraction of the
whole figure)
pad_top_h (float): Padding for top axes height (as a fraction of the
whole figure)
pad_bottom_w (float): Padding for bottom axis width (as a fraction
of the whole figure)
pad_bottom_h (float): Padding for bottom axis height (as a fraction
of the whole figure)
args: Positional arguments for :class:`tkinter.ttk.Frame`
kwargs: Keyword arguments for :class:`tkinter.ttk.Frame`"""
def __init__(
self,
*args,
pad_top_w: float = 0.09,
pad_top_wm: float = 0.02,
pad_top_h: float = 0.02,
pad_bottom_w: float = 0.05,
pad_bottom_h: float = 0.05,
**kwargs
):
super().__init__(*args, **kwargs)
self.filedialog_dir_save = None
self.responsive(1, 1)
# --- Pyplot widgets -----------------------------------------------------
self.plt = pyplot.PyplotFrame(self)
self.plt.grid(row=0)
top_width = 0.5 - pad_top_w - 0.5 * pad_top_wm
top_height = 0.5 - 2 * pad_top_h
self.ax = (
self.plt.fig.add_axes((
pad_top_w,
0.5 + pad_top_h,
top_width,
top_height,
)),
self.plt.fig.add_axes((
0.50 + pad_top_wm / 2,
0.50 + pad_top_h,
top_width,
top_height,
)),
self.plt.fig.add_axes((
pad_bottom_w,
pad_bottom_h,
1 - 2 * pad_bottom_w,
0.5 - 2 * pad_bottom_h,
)),
)
fc = utils.root_color(self, "TLabel", "background")
if fc is not None:
self.plt.fig.set_facecolor(fc)
for ax in self.ax:
ax.set_frame_on(False)
ax.set_xticks(())
ax.set_yticks(())
# ------------------------------------------------------------------------
self.progressbar = tk.Progressbar(self, maximum=1, value=0)
self.progressbar.grid(row=1)
self.bottom_row = tk.Frame(self)
self.bottom_row.grid(row=2)
self.bottom_row.responsive(1, 4)
# Analysis button
self.analysis_button = tk.Button(self.bottom_row, text="Analyze")
self.analysis_button.bind("<Button-1>", self.analysis_cbk)
self.analysis_button.grid(column=0, row=0)
# Audio play buttons
self._tmp_audio = None
self.play_button_o = tk.Button(self.bottom_row, text="Play Original")
self.play_button_o.grid(column=1, row=0)
self.play_button_o.bind("<Button-1>", self.play_cbk(True))
self.play_button_r = tk.Button(self.bottom_row, text="Play Resynthesis")
self.play_button_r.grid(column=2, row=0)
self.play_button_r.bind("<Button-1>", self.play_cbk(False))
# Export button
self.export_button = tk.Button(self.bottom_row, text="Export")
self.export_button.bind("<Button-1>", self.export_cbk)
self.export_button.grid(column=3, row=0)
def update_plot(self):
"""Update analysis and resynthesis figure"""
for ax in self.ax:
ax.clear()
m = self.sample_object.sinusoidal_model
stft = np.array([mx for mx, _ in m.intermediate_["stft"]]).T
if m.reverse:
stft = np.fliplr(stft)
tmax = len(m.intermediate_["stft"]) * m.h / m.fs
plots.sine_tracking_2d(m, ax=self.ax)
if tmax > 0:
xlim = (0, tmax)
else:
xlim = self.ax[0].get_xlim()
ylim = self.ax[0].get_ylim()
self.ax[0].imshow(
stft, cmap="Greys",
origin="lower", aspect="auto",
extent=(*xlim, 0, m.fs/2),
)
self.ax[0].set_ylim(ylim)
self.ax[0].set_xlim(xlim)
self.ax[0].grid(False)
self.ax[0].set_title("")
self.ax[0].set_ylabel("frequency (Hz)")
self.ax[1].set_title("")
self.ax[1].set_ylabel("magnitude (dB)")
self.ax[1].yaxis.tick_right()
self.ax[1].yaxis.set_label_position("right")
x = self.audio_x[self.audio_trim_start:self.audio_trim_stop]
t = | np.arange(x.size) | numpy.arange |
#!/usr/bin/env python
# coding: utf-8
# Import libraries
import numpy as np
import os , csv
from os import listdir
import matplotlib.pyplot as plt
import pandas as pd
from scipy.sparse import csr_matrix
from scipy.interpolate import interp1d
from sklearn.decomposition import TruncatedSVD
# Load desired data from 1 session 1 animal
# Note that there are 340 trials in 1 session
# (For more info https://github.com/nsteinme/steinmetz-et-al-2019/wiki/data-files)
'''
data_path = '/Users/xinweichia/Documents/connected_lizards/Steinmetz_dataset/Richards_2017-10-31'
trials_intervals = np.load(data_path+'/'+'trials.intervals.npy') # in seconds
spike_times = np.load(data_path+'/'+'spikes.times.npy') * 1000 # Unbinned spike times in ms
trials_gocue_times = np.load(data_path+'/'+'trials.goCue_times.npy')
trials_response_choice = np.load(data_path+'/'+'trials.response_choice.npy') # -1 left, 1, right, 0 no response
spontaneous_intervals = np.load(data_path+'/'+'spontaneous.intervals.npy')
trials_response_time = np.load(data_path+'/'+'trials.response_times.npy')
spike_clusters = np.load(data_path+'/'+'spikes.clusters.npy')
site_positions = np.load(data_path+'/'+'channels.sitePositions.npy')
clusters_depths = np.load(data_path+'/'+'clusters.depths.npy')
clusters_annotation = np.load(data_path+'/'+'clusters._phy_annotation.npy')
channel_sites = np.load(data_path+'/'+'channels.site.npy')
channels_brainlocation = pd.read_csv(data_path+'/'+'channels.brainLocation.tsv', sep='\t')
clusters_probes = np.load(data_path+'/'+'clusters.probes.npy')
channels_probe = np.load(data_path+'/'+'channels.probe.npy')
trials_visual_time = np.load(data_path+'/'+'trials.visualStim_times.npy')
visual_times = trials_visual_time
# Behaviour data
wheel_movement = np.load(data_path+'/'+'wheelMoves.type.npy')
wheel_intervals = np.load(data_path+'/'+'wheelMoves.intervals.npy')
'''
# Taken from https://github.com/MouseLand/steinmetz2019_NMA/blob/master/steinmetz_loader.py
# To obtain brain regions
def get_good_cells(fdirpath): #
# location in brain of each neuron
brain_loc = os.path.join(fdirpath, "channels.brainLocation.tsv")
good_cells = (np.load(os.path.join(fdirpath, "clusters._phy_annotation.npy")) >= 2 ).flatten()
clust_channel = np.load(os.path.join(fdirpath, "clusters.peakChannel.npy")).astype(int) - 1
br = []
with open(brain_loc, 'r') as tsv:
tsvin = csv.reader(tsv, delimiter="\t")
k=0
for row in tsvin:
if k>0:
br.append(row[-1])
k+=1
br = np.array(br)
good_cells = np.logical_and(good_cells, clust_channel.flatten()<len(br))
brain_region = br[clust_channel[:,0]]
return good_cells, brain_region, br
#good_cells, brain_regions ,br = get_good_cells(data_path) # Get brain regions
#EDIT Add cluster annotation, spike_clusters
def bin_spikes(spike_times,spike_clusters,clusters_annotation, bin_size = 10):
# Using clusters._phy_annotation.npy obtain valid clusters (i.e. >= 2)
# valid_clusters_idx = np.array(np.where(clusters_annotation>=2))[0]
spike_time_cells = np.empty(len(clusters_annotation), dtype=object) # Initalise empty object
for i in (np.arange(len(np.unique(spike_clusters)))):
# Create a spike time arrays, where each array in the array is a spike time of a cell
spike_time_cells[i] = spike_times[(np.where(spike_clusters == i)[0])]
# Bin spike times into 10ms intervals
spike_time_binned = np.empty(len(np.unique(spike_clusters)), dtype=object) # Initalise empty object
sum_spikes = np.empty(len(np.unique(spike_clusters)), dtype=object) # Initalise empty object
for cell_num in np.arange(len(spike_time_cells)):
spike_time_hist = np.histogram(spike_time_cells[cell_num],bins = np.arange(0,np.floor(spike_time_cells[cell_num][-1]),bin_size))
spike_time_binned[cell_num] = spike_time_hist[0]
sum_spikes[cell_num] = np.sum(spike_time_binned[cell_num])
cell_spikes_max = np.argmax(sum_spikes) # cell with the maximum number of spikes for plotting purposes
# Spike_time_binned returns binned spikes sorted into cells
# Spike_time_cells returns UNbinned spikes sorted into cells
# cell_spikes_max returns a single cell index that has the max number of spikes (i.e most active cell)
return spike_time_binned, spike_time_cells, cell_spikes_max
#spike_time_binned, spike_time_cells, cell_spikes_max = bin_spikes(spike_times,10)
# Sort cells into trial types and relevant epoch
# (Need to change the sorting into matrices rather than vectors)
#EDIT Add Trials Intervals
def sort_cells_trials(spike_time_binned,spike_time_cells, trials_intervals,trials_visual_time,epoch_duration = 400 , bin_size = 10):
# Epoch duration is defined as the period after the visual stimulus
# Sort into trials
spike_time_binned_trial = np.empty(len(spike_time_binned), dtype=object)
spike_time_binned_trial_response = np.empty(len(spike_time_binned), dtype=object)
for cell_num in np.arange(len(spike_time_binned)):
spike_time_binned_trial[cell_num] = np.empty(len(trials_intervals), dtype=object)
spike_time_binned_trial_response[cell_num] = np.empty(len(trials_intervals), dtype=object)
for i,trials_start_end in enumerate(trials_intervals):
# Sort spikes into their trial numbers.
spike_time_binned_trial[cell_num][i] = spike_time_binned[cell_num][ int(np.floor(trials_start_end[0]*(1000/bin_size))) : int(np.floor(trials_start_end[1]*(1000/bin_size)))]
# Using visual onset to splice a trial into visual onset : visual onset +400ms
spike_time_binned_trial_response[cell_num][i] = spike_time_binned[cell_num][(int(np.floor(trials_visual_time[i]*(1000/bin_size)))) : (int( | np.floor(trials_visual_time[i]*(1000/bin_size)) | numpy.floor |
import utm as UTM
import unittest
import numpy as np
class UTMTestCase(unittest.TestCase):
def assert_utm_equal(self, a, b):
self.assertTrue(np.allclose(a[0], b[0]))
self.assertTrue(np.allclose(a[1], b[1]))
self.assertEqual(a[2], b[2])
self.assertEqual(a[3].upper(), b[3].upper())
def assert_latlon_equal(self, a, b):
self.assertTrue(np.allclose(a[0], b[0], rtol=1e-4, atol=1e-4))
self.assertTrue(np.allclose(a[1], b[1], rtol=1e-4, atol=1e-4))
class KnownValues(UTMTestCase):
known_values = [
# Aachen, Germany
(
(50.77535, 6.08389),
(294409, 5628898, 32, 'U'),
{'northern': True},
),
# New York, USA
(
(40.71435, -74.00597),
(583960, 4507523, 18, 'T'),
{'northern': True},
),
# Wellington, New Zealand
(
(-41.28646, 174.77624),
(313784, 5427057, 60, 'G'),
{'northern': False},
),
# Capetown, South Africa
(
(-33.92487, 18.42406),
(261878, 6243186, 34, 'H'),
{'northern': False},
),
# Mendoza, Argentina
(
(-32.89018, -68.84405),
(514586, 6360877, 19, 'h'),
{'northern': False},
),
# Fairbanks, Alaska, USA
(
(64.83778, -147.71639),
(466013, 7190568, 6, 'W'),
{'northern': True},
),
# <NAME>, Scotland, UK
(
(56.79680, -5.00601),
(377486, 6296562, 30, 'V'),
{'northern': True},
),
# Latitude 84
(
(84, -5.00601),
(476594, 9328501, 30, 'X'),
{'northern': True},
),
]
def test_from_latlon(self):
lats = np.array([0.0, 3.0, 6.0])
lons = np.array([0.0, 1.0, 3.4])
result = UTM.from_latlon(lats, lons)
self.assert_utm_equal((np.array([166021.44317933032,
277707.83075574087,
544268.12794623]),
np.array([0.0,
331796.29167519242,
663220.7198366751]),
31, 'N'), result)
for latlon, utm, _ in self.known_values:
result = UTM.from_latlon(*[np.array([x]) for x in latlon])
self.assert_utm_equal(utm, result)
def test_to_latlon(self):
result = UTM.to_latlon(np.array([166021.44317933032,
277707.83075574087,
544268.12794623]),
np.array([0.0,
331796.29167519242,
663220.7198366751]),
31, 'N')
self.assert_latlon_equal((np.array([0.0, 3.0, 6.0]),
np.array([0.0, 1.0, 3.4])),
result)
for latlon, utm, utm_kw in self.known_values:
utm = [np.array([x]) for x in utm[:2]] + list(utm[2:])
result = UTM.to_latlon(*utm)
self.assert_latlon_equal(latlon, result)
class BadInput(UTMTestCase):
def test_from_latlon_range_checks(self):
'''from_latlon should fail with out-of-bounds input'''
self.assertRaises(UTM.OutOfRangeError, UTM.from_latlon,
np.array(-100), np.array(0))
self.assertRaises(UTM.OutOfRangeError, UTM.from_latlon,
np.array(-80.1), np.array(0))
for i in range(-8000, 8400):
UTM.from_latlon(np.array(i / 100.0), np.array(0))
self.assertRaises(UTM.OutOfRangeError, UTM.from_latlon,
np.array(84.1), np.array(0))
self.assertRaises(UTM.OutOfRangeError, UTM.from_latlon,
np.array(100), np.array(0))
self.assertRaises(UTM.OutOfRangeError, UTM.from_latlon,
np.array(0), np.array(-300))
self.assertRaises(UTM.OutOfRangeError, UTM.from_latlon,
np.array(0), np.array(-180.1))
for i in range(-18000, 18000):
UTM.from_latlon(np.array(0), np.array(i / 100.0))
self.assertRaises(UTM.OutOfRangeError, UTM.from_latlon,
np.array(0), np.array(180.1))
self.assertRaises(UTM.OutOfRangeError, UTM.from_latlon,
np.array(0), np.array(300))
self.assertRaises(UTM.OutOfRangeError, UTM.from_latlon,
np.array(-100), np.array(-300))
self.assertRaises(UTM.OutOfRangeError, UTM.from_latlon,
np.array(100), np.array(-300))
self.assertRaises(UTM.OutOfRangeError, UTM.from_latlon,
np.array(-100), np.array(300))
self.assertRaises(UTM.OutOfRangeError, UTM.from_latlon,
np.array(100), np.array(300))
# test forcing zone ranges
# NYC should be zone 18T
self.assertRaises(UTM.OutOfRangeError, UTM.from_latlon,
np.array(40.71435), np.array(-74.00597), 70, 'T')
self.assertRaises(UTM.OutOfRangeError, UTM.from_latlon,
np.array(40.71435), np.array(-74.00597), 18, 'A')
def test_to_latlon_range_checks(self):
'''to_latlon should fail with out-of-bounds input'''
# test easting range
self.assertRaises(
UTM.OutOfRangeError, UTM.to_latlon, np.array(0), np.array(5000000), 32, 'U')
self.assertRaises(
UTM.OutOfRangeError, UTM.to_latlon, np.array(99999), np.array(5000000), 32, 'U')
for i in range(100000, 999999, 1000):
UTM.to_latlon(np.array(i), np.array(5000000), 32, 'U')
self.assertRaises(
UTM.OutOfRangeError, UTM.to_latlon, np.array(1000000), np.array(5000000), 32, 'U')
self.assertRaises(
UTM.OutOfRangeError, UTM.to_latlon, np.array(100000000000), np.array(5000000), 32, 'U')
# test northing range
self.assertRaises(
UTM.OutOfRangeError, UTM.to_latlon, np.array(500000), np.array(-100000), 32, 'U')
self.assertRaises(
UTM.OutOfRangeError, UTM.to_latlon, np.array(500000), np.array(-1), 32, 'U')
for i in range(10, 10000000, 1000):
UTM.to_latlon(np.array(500000), np.array(i), 32, 'U')
self.assertRaises(
UTM.OutOfRangeError, UTM.to_latlon, np.array(500000), np.array(10000001), 32, 'U')
self.assertRaises(
UTM.OutOfRangeError, UTM.to_latlon, np.array(500000), np.array(50000000), 32, 'U')
# test zone numbers
self.assertRaises(
UTM.OutOfRangeError, UTM.to_latlon, np.array(500000), np.array(5000000), 0, 'U')
for i in range(1, 60):
UTM.to_latlon(np.array(500000), np.array(5000000), i, 'U')
self.assertRaises(
UTM.OutOfRangeError, UTM.to_latlon, np.array(500000), np.array(5000000), 61, 'U')
self.assertRaises(
UTM.OutOfRangeError, UTM.to_latlon, np.array(500000), | np.array(5000000) | numpy.array |
import numpy as np
def make_cell_division_times(n_divisions, n_replicates=8, std=.1, seed=None, drop_p=.05, maxtime=10):
"""
Simulate Cell division times for n_divisions. The division times are drawn between 0 and maxtime.
Dropout events are simulated for cells to be dropped due to technical variance.
Cell stages increase exponentially (*2) for each cell division.
The first cell stage cannot have dropout events.
:param n_divisions: number of divions the cells do.
:param n_replicates: number of cells in the beginning.
:param std: amount of noise (in standard deviation) to add.
:param seed: seed for reproduceability.
:param drop_p: drop probability for each cell after first cell stage.
:param maxtime: the end point in time.
"""
seed = seed or np.random.randint(1000,10000)
np.random.seed(seed)
# Make random blobs of data with varying varainces, dependend on blob size.
# This reflects cells of similar expression, and therefore, expresses cell stages/differentiation.
class_sizes = n_replicates*(2**np.arange(0, n_divisions+1))#np.diff(np.r_[0, class_centers, n_data])
n_data = class_sizes.sum()
class_centers = (np.linspace(0,n_data,n_divisions+2)).astype(int)
class_centers = np.int64((class_centers[:-1] + class_centers[1:])/2)
class_vars = np.ones(class_sizes.size)*std#(max_variance/class_sizes.max())*class_sizes
#class_vars = (std/class_sizes.max())*class_sizes
# Each cell has their own center, such that the higher
# cell stages have a plateue distribution (multimodal normal distribution)
# such that biological variation is captured better
t = []
for mu, st, size in zip(np.exp(np.linspace(0,np.log(maxtime),n_data)[class_centers]), class_vars, class_sizes):
stage = int(size/n_replicates)
offset = (2*st)#/float(stage)
# make cell centers for each cell stage:
# cell_centers = np.random.normal(mu, var/2., stage)
cell_centers = np.random.uniform((1-.1)*mu, (1+.1)*mu, stage)
# Then make replicates around the centers
cell_individuals = np.random.uniform(cell_centers-offset, cell_centers+offset, (n_replicates,stage)).flatten()
# cell_individuals = np.random.normal(cell_centers, var, (n_replicates,stage)).flatten()
#cell_individuals = np.random.uniform((1-offset)*mu, (1+offset)*mu, n_replicates*stage)
t = np.r_[t, cell_individuals]
t = t[:,None]
t.sort(0)
labels = []
cell_stage = 1
for size in class_sizes:
labels.extend([cell_stage]*size)
cell_stage *= 2
labels = np.r_[labels]
# Now we want to simulate some techincal drop out events,
# where cells where destroyed or fell out due to technical variation,
# Thus, we go hyper geometric, and the first cell stage cannot
# fall out:
#dropout_p = np.zeros(n_replicates)
#for i in range(1,n_divisions+1):
# stage_size = n_replicates*(2**i)
# dropout_p = np.r_[dropout_p, np.repeat(2.0**i, stage_size)]
#dropout_p /= dropout_p.max()
#dropout_p *= drop_p
dropout_p = np.ones(n_data) * drop_p
dropout_p[:n_replicates] = 0
dropouts = np.random.binomial(1, 1.-dropout_p).astype(bool)
return t[dropouts], labels[dropouts], seed
import GPy
from GPy.util import diag
from scipy.stats import norm
def simulate_latent_space(t, labels, seed=None, var=.2, split_prob=.1, gap=.75):
"""
Simulate splitting events in the latent space. The input time t is
a one dimensional array having the times in it. The labels is a int
array-like, which holds the labels for the wanted cell types.
Basically it is an array of repetitions of 1 to number of cell types,
e.g.: array([1..1,2..2,3..3,4..4]) for 4 cell types.
:param array_like t: the time as [nx1] array, where n is the number of cells.
:param array_like labels: the labels for the cells before splitting.
:param int seed: the seed for this splitting, for reproducability.
:param scalar var: the variance of spread of the first split, increasing after that.
:param [0,1] split_prop: probability of split in the beginning, halfs with each split.
:param [0,1] gap: the gap size between splitends and the beginning of the next.
The method returns Xsim, seed, labels, time::
- Xsim is the two dimensional latent space with splits included.
- seed is the seed generated, for reproduceability.
- labels are the corrected labels, for split events.
- time is the corrected timeline for split events.
"""
seed = seed or np.random.randint(1000,10000)
np.random.seed(seed)
n_data = t.shape[0]
newlabs = []
assert np.issubdtype(labels.dtype, np.int_) and np.greater(labels, 0).all(), "labels need to be of positive integer dtype, 0 is not allowed"
ulabs = []
for x in range(n_data):
if labels[x] not in ulabs:
ulabs.append(labels[x])
Xsim = np.zeros((n_data, 2))
split_ends = [Xsim[0]]
prev_ms = [[.1,.1]]
split_end_times = [t[labels==ulabs[0]].max()]
t = np.sort(t.copy(), 0)
tmax = t.max()
for lab in ulabs:
fil = (lab==labels).nonzero()[0]
# zero out, for simulating linear relation within cluster:
new_se = []
new_m = []
new_set = []
splits = np.array_split(fil, len(split_ends))
i = 1
for s in range(len(split_ends)):
# for all previously done splits:
prev_m = prev_ms[s]
split = splits[s]
split_end = split_ends[s]
split_end_time = split_end_times[s]
pre_theta = None
prev_split_time = None
for split in np.array_split(split, np.random.binomial(1, split_prob)+1):
newlabs.extend(["{} {}".format(_c, i) for _c in labels[split]])
i += 1
# If we split a collection into two, we want the two times to match up now:
if prev_split_time is None:
prev_split_time = t[split].ptp()
else:
t[split.min():] -= prev_split_time
t[split] -= (t[split.min()]-split_end_time)
# make splits longer, the farther in we are into
# the split process, it scales with sqrt(<split#>)
x = t[split].copy()
x -= x.min()
x /= x.max()
x *= np.sqrt(lab)
# rotate m away a little from the previous direction:
if pre_theta is None:
pre_theta = theta = np.random.uniform(-45, 45)
else:
theta = ((pre_theta+90)%90)-90
theta *= (np.pi/180.) # radians for rotation matrix
rot_m = np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), | np.cos(theta) | numpy.cos |
import numpy as np
from ._base import GraphWorld, grid_to_adj
class CliffWalking(GraphWorld):
"""Cliff-walking task environment.
Parameters
----------
cliff : float
Value of falling off cliff.
Attributes
----------
states : array, shape = (n,)
Indices of states.
n_states : int
Total number of states.
viable_states : array
Indices of viable states.
n_viable_states : int
Number of viable states.
info : DataFrame
Pandas DataFrame storing the dynamics of the Markov decision process.
Rows correspond to each viable Q-value, whereas each column contains
its associated information.
References
----------
1. <NAME>., & <NAME>. (2018). Reinforcement learning: An introduction. MIT press.
2. <NAME>. (2003). Reinforcement learning under circumstances beyond its control.
"""
def __init__(self, cliff=-100):
## Define gridworld.
self.grid = np.arange(11 * 12, dtype=int).reshape(11,12)
self.shape = self.grid.shape
## Define start/terminal states.
start = 120
terminal = | np.array([121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131]) | numpy.array |
from abc import ABC, abstractmethod
from sigpipes import features
from sigpipes.sigcontainer import SigContainer, DPath
from sigpipes.sigfuture import SigFuture, SignalSpace
from sigpipes.auxtools import seq_wrap
from sigpipes.auxtools import TimeUnit
import gzip
from typing import Sequence, Union, Iterable, Optional, MutableMapping, Any, Mapping
import collections.abc
import sys
import fractions
from pathlib import Path
import numpy as np
import scipy.signal as sig
import scipy.fftpack as fft
from deprecated import deprecated
class SigOperator:
"""
Base abstract class of signal operators.
"""
def apply(self, container: SigContainer) -> Any:
raise NotImplementedError("Abstract method")
def prepare_container(self, container: SigContainer) -> SigContainer:
"""
Prepare container at the beginning of apply method.
(this method must be called at the first line of `apply` method)
Args:
container: prepared signal container
"""
return container
def __ror__(self, container: Union[SigContainer, Sequence[SigContainer], "SigOperator"]
) -> Any:
"""
Pipe operator for streamlining of signal operators
Args:
container: left operand i.e signal container (input), sequence of containers
(multiple inputs) or another signal operator (formation of compound operators).
Returns:
- for container as input: container, sequence of containers,
or another data structures (only consumers)
- for sequence of containers as input: sequence of containers,
sequence of another data structures (only consumers)
- for signal operators in both operands: compound signal operator
"""
if isinstance(container, SigContainer):
container.d["log"].append(self.log())
return self.apply(container)
elif isinstance(container, collections.abc.Sequence):
return [c | self for c in container]
elif isinstance(container, SigOperator):
return CompoundSigOperator(container, self)
elif isinstance(container, SigFuture):
if isinstance(self, ParallelSigOperator):
return self.par_apply(container)
else:
return SigFuture(container, fn=self.apply,
sigspace=self.sigspace_transformation(container.sigspace),
node_description=self.log())
else:
raise TypeError("Unsupported left operand of pipe")
def __or__(self, other):
return CompoundSigOperator(self, other)
def log(self):
"""
Identification of operation for logging purposes.
Returns:
Simple (and if possible short) identification.
"""
return self.__class__.__name__
def sigspace_transformation(self, sigspace:SignalSpace) -> SignalSpace:
return sigspace
class ParallelSigOperator(ABC):
@abstractmethod
def par_apply(self, future: SigFuture) -> SigFuture:
pass
class Identity(SigOperator):
"""
Base class for operators which do not modify container.
"""
def apply(self, container: SigContainer) -> SigContainer:
container = self.prepare_container(container)
return container
def log(self):
return "#" + self.__class__.__name__
class MaybeConsumerOperator(Identity):
"""
Abstract class for operators which can works as final consumers i.e. it can produce different
representation of signal data e.g. dataframes, matplot figures, etc.
"""
pass
class CompoundSigOperator(SigOperator):
def __init__(self, left_operator: SigOperator, right_operator: SigOperator) -> None:
self.left = left_operator
self.right = right_operator
def apply(self, container: SigContainer):
container = self.prepare_container(container)
return container | self.left | self.right
def log(self):
return "#COMP"
class Print(Identity):
"""
Operator which prints debug text representation into text output
"""
def __init__(self, output=">", header=True):
"""
Args:
output: file like object or name of output file (">" is stdout out, ">2" stderr)
header: the header with log-id is printed
"""
self.output = output
self.header = header
def apply(self, container: SigContainer) -> SigContainer:
container = self.prepare_container(container)
if self.output == ">":
f = sys.stdout
elif self.output == ">2":
f = sys.stderr
elif isinstance(self.output, str):
f = open(self.output, "wt") # open in apply, because the file objects are not pickable
else:
f = self.output
if self.header:
print(container.id, file=f)
print("-"*40, file=f)
print(str(container), file=f)
return container
class SigModifierOperator(SigOperator):
"""
Abstract class for operators which modify signal data.
"""
def prepare_container(self, container: SigContainer) -> SigContainer:
return SigContainer(container.d.deepcopy(shared_folders=["annotations"],
empty_folders=["meta"]))
class Sample(SigModifierOperator):
"""
Sample (continuous interval) of signal (for all channels)
"""
def __init__(self, start: Union[int, float, np.timedelta64],
end: Union[int, float, np.timedelta64]):
"""
Args:
start: start point of sample. integer: sample number, float: time in seconds,
np.timedelta64: time represented by standard time representation of numpy)
end: end point of sample (see `start` for interpretation)
"""
self.start = start
self.end = end
def apply(self, container: SigContainer) -> SigContainer:
container = self.prepare_container(container)
fs = container.d["signals/fs"]
lag = container.lag
start = TimeUnit.to_sample(self.start, fs, TimeUnit.time_unit_mapper(self.start), lag)
end = TimeUnit.to_sample(self.end, fs, TimeUnit.time_unit_mapper(self.end), lag)
container.d["signals/data"] = container.d["signals/data"][:, start:end]
container.d["signals/lag"] = lag - start
if "annotations" in container.d:
adict = container.d["annotations"]
newdict = SigContainer.cut_annots(adict, start, end)
adict.update(newdict)
return container
def log(self):
return f"SAMP@{str(self.start)}@{str(self.end)}"
class ChannelSelect(SigOperator):
"""
Selection of limited subset of channels.
"""
def __init__(self, selector: Sequence[int]) -> None:
"""
Args:
selector: sequence of (integer) indexes of channels
"""
self.selector = selector
def prepare_container(self, container: SigContainer) -> SigContainer:
return SigContainer(container.d.deepcopy(shared_folders=["annotations"],
empty_folders=["signals"]))
def apply(self, container: SigContainer) -> SigContainer:
nc = self.prepare_container(container)
nc.d["signals/data"] = container.d["signals/data"][self.selector, :]
nc.d["signals/channels"] = np.array(container.d["signals/channels"])[self.selector].tolist()
nc.d["signals/units"] = np.array(container.d["signals/units"])[self.selector].tolist()
nc.d["signals/fs"] = container.d["signals/fs"]
if "meta" in nc.d:
nc.d.map(lambda a: a[self.selector], root="meta")
return nc
def log(self):
return f"CHSEL@{','.join(str(s) for s in self.selector)}"
class MetaProducerOperator(SigOperator):
"""
Abstract class for operators which product metadata (i.e. data inferred from signals)
"""
def prepare_container(self, container: SigContainer) -> SigContainer:
return SigContainer(container.d.deepcopy(["signals", "annotation"]))
class FeatureExtractor(MetaProducerOperator):
def __init__(self, features_dict: Mapping[str, Union[bool,float,Sequence[float]]] = None,
*, wamp_threshold: Union[float, Sequence[float]] = (),
zc_diff_threshold: float = (), zc_mul_threshold = (),
sc_threshold: float = ()):
self.feature_dict = features_dict if features_dict is not None else {feature: True for feature
in features.NON_THRESHOLD}
if wamp_threshold:
self.feature_dict["WAMP"] = wamp_threshold
if zc_diff_threshold and zc_mul_threshold:
self.feature_dict["ZC"] = zip(zc_diff_threshold, zc_mul_threshold)
if sc_threshold:
self.feature_dict["SC"] = sc_threshold
def apply(self, container: SigContainer) -> SigContainer:
container = self.prepare_container(container)
n = container.sample_count
data = container.d["signals/data"]
fkeys = {key for key in self.feature_dict.keys() if self.feature_dict[key]}
thresholds = {key : value for key,value in self.feature_dict.items() if key in features.WITH_THRESHOLD}
fdict = features.features(data, fkeys, thresholds)
path = "meta/features"
container.d.make_folder(path)
container.d[path].update(fdict)
return container
class FeatureExtraction(MetaProducerOperator):
"""
Extraction of basic features of signal.
"""
def __init__(self, *, wamp_threshold: Union[float, Sequence[float]] = (),
zc_diff_threshold: float = 0.0, zc_mul_threshold = 0.0,
sc_threshold: float = 0.0):
"""
Args:
wamp_threshold: threshold value (or sequence of values) foe WAMP feature
"""
self.wamp_threshold = seq_wrap(wamp_threshold)
self.target = "features"
self.zc_diff_threshold = zc_diff_threshold
self.zc_mul_threshold = zc_mul_threshold
self.sc_threshold = sc_threshold
def apply(self, container: SigContainer) -> SigContainer:
container = self.prepare_container(container)
n = container.sample_count
data = container.d["signals/data"]
absum = np.sum( | np.abs(data) | numpy.abs |
'''
-----------------------------------------------
File Name: data_seg$
Description:
Author: Jing$
Date: 6/29/2021$
-----------------------------------------------
'''
from __future__ import division
import warnings
warnings.filterwarnings('ignore') # ignore warnings
import os
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3" # only show error
import numpy as np
import pandas as pd
import cv2 as cv
from PIL import Image
import skimage.io as io
from skimage import img_as_ubyte
import os
from medpy.metric.binary import dc, hd, assd
from keras import backend as K
from keras.optimizers import Adam
#from tensorflow.keras.optimizers import Adam # only for doubleunet
from keras.callbacks import CSVLogger, ModelCheckpoint, EarlyStopping
import segmentation_models as sm
from model_seg import *
from doubleu_net import *
def load_data(img_path_aug, img_path_ori, gt_path_aug, gt_path_ori, csv_aug, csv_ori, H, W):
df_ori = pd.read_csv(csv_ori)
df_aug = pd.read_csv(csv_aug)
filename_list_ori = df_ori['filename'].values
filename_list_aug = df_aug['filename'].values
pixel_size_ori = df_ori['pixel size(mm)'].values
hcpx_ori = df_ori['head circumference(px)'].values
img_ori = []
label_ori = []
img_aug = []
label_aug = []
pixel_ori = []
label_hc = []
for (i, f) in enumerate(filename_list_ori):
img = Image.open(img_path_ori + f).convert('RGB') # 3 channels
img = img.resize((H,W))
img = np.array(img)
img_norm = (img - np.mean(img)) / np.std(img) # normalize
img_ori.append(img_norm)
pixel_ori.append(pixel_size_ori[i])
label_hc.append(hcpx_ori[i])
gt = Image.open(gt_path_ori + f).convert('L')
gt = gt.resize((H,W))
gt = np.array(gt)
gt[gt > 0.5] = 1 # normalize
gt[gt <= 0.5] = 0
gt = gt[:, :, np.newaxis]
label_ori.append(gt)
for (i, f) in enumerate(filename_list_aug):
img = Image.open(img_path_aug + f).convert('RGB')
img = img.resize((H,W))
img = np.array(img)
img_norm = (img - np.mean(img)) / np.std(img) # normalize
# img = img_norm[:, :, np.newaxis]
img_aug.append(img_norm)
gt = Image.open(gt_path_aug + f).convert('L')
gt = gt.resize((H,W))
gt = np.array(gt)
gt[gt > 0.5] = 1 # normalize
gt[gt <= 0.5] = 0
gt = gt[:, :, np.newaxis]
label_aug.append(gt)
print("load data successfully!")
return np.asarray(img_aug, dtype=np.float64), np.asarray(label_aug), | np.asarray(label_hc) | numpy.asarray |
"""
Copyright (C) 2019. Huawei Technologies Co., Ltd and McGill University. All rights reserved.
This program is free software; you can redistribute it and/or modify
it under the terms of the MIT License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
MIT License for more details.
"""
import numpy as np
from src.utils import reparameterized_to_beta, reparameterized_to_pi, graph_preparation, metric_perp_avg, \
bernuli_distrbution, step_size_function, accuracy_avg, initialize_theta_phi_with_better_initialization
class MMSBM_SGMCMC:
def __init__(self, flags, n, k, edges, nonedges, beta_prior, membership_prior, theta_constant,
phi_constant, true_labels, better_initialization_flag, step_size_scalar,
node_neighbors_dict, val_set_index,
mu=1, max_iter=10000):
""" follows the notations in the original paper
:param flags: hyper-parameters for GCN and MMSBM
:param n: node number
:param k: class number
:param edges: edge indices
:param nonedges: non-edge indices
:param beta_prior: prior for the community strength
:param membership_prior: prior for the membership
:param theta_constant: re-parameterization constant for community strength beta
:param phi_constant: re-parameterization constant for membership
:param true_labels: ground truth labels
:param better_initialization_flag: a flag indicate if we train the MMSBM from scratch or we use the better initialization output from gcn
:param step_size_scalar: step size for the MMSBM
:param node_neighbors_dict: a dict for query the neighborhood node indices
:param val_set_index: indices for the validation set
"""
self.gamma_scale = flags.gamma_scale
self.better_initialization_flag = better_initialization_flag
self.step_size_scalar = step_size_scalar
self.flags = flags
self.n = n # number of nodes
self.k = k
self.val_set_index = val_set_index
self.alpha = 1.0 / k
self.mu = mu
self.tao = 1024
self.n_list_set = np.array([i for i in range(self.n)])
self.max_iter = max_iter
self.mini_batch_nodes = flags.batch_size
self.true_labels = true_labels
self.sample_n = 20 # sample size for update each local parameters
self.T = 1 # sample number of pi and beta for each edge during the evaluation process
self.test_edges_n = 500 # test set edges for the perplexity test
self.delta = flags.delta
self.node_neighbors_dict = node_neighbors_dict
self.avg_predict_label = 0
# variable initialization (random initialization)
if not self.better_initialization_flag:
self.phi = np.random.gamma(self.alpha, 1, size=(self.n, self.k))
self.theta = np.random.gamma(self.mu, 1, size=(self.k, 2))
self.beta, self.theta_constant = reparameterized_to_beta(self.theta)
self.pi, self.phi_constant = reparameterized_to_pi(self.phi, self.n)
self.initial_prediction_labels = self.pi.argmax(axis=1)
else:
self.theta_constant = theta_constant
self.phi_constant = phi_constant
self.beta = beta_prior
self.pi = membership_prior
self.initial_prediction_labels = membership_prior.argmax(axis=1)
self.theta, self.phi = initialize_theta_phi_with_better_initialization(self.beta, self.pi,
self.theta_constant,
self.phi_constant,
k)
self.MCMC_MMSBM_prediction_labels = self.initial_prediction_labels
self.B = np.ones((self.k, self.k)) * flags.delta
# Info of the given topology, split into the edges and non-edges
self.edges = edges
self.nonedges = nonedges
self.edges_n, self.nonedges_n, self.test_set, self.y_test_set = graph_preparation(self.edges, self.nonedges,
test_edges_n=self.test_edges_n)
self.sampled_non_edges_ratio = self.flags.sampled_non_edges_ratio
self.sampled_non_edges_n = int(self.sampled_non_edges_ratio * self.nonedges_n)
self.dir = 'figures/'
def Z_constant_mini_batch_phi(self, node_a_membership, node_b_membership, link_index_mini_batch):
bernuli_delta_mini_batch = np.ones((len(node_a_membership), 1)) * (1 - self.delta)
bernuli_delta_mini_batch[link_index_mini_batch] = self.delta
Z_constant_mini_batch = bernuli_delta_mini_batch.copy()
for k in range(self.k):
bernuli_beta_k_mini_batch = np.ones((len(node_a_membership), 1)) * (1 - self.beta[k])
bernuli_beta_k_mini_batch[link_index_mini_batch] = self.beta[k]
pi_a_k = (node_a_membership[:, k]).reshape(len(node_a_membership), 1)
pi_b_k = (node_b_membership[:, k]).reshape(len(node_b_membership), 1)
Z_constant_mini_batch += (bernuli_beta_k_mini_batch - bernuli_delta_mini_batch) * pi_a_k * pi_b_k
return Z_constant_mini_batch
def Z_constant_mini_batch(self, node_a_membership, node_b_membership, links_flag):
if links_flag:
bernuli_delta_mini_batch = np.ones((len(node_a_membership), 1)) * self.delta
else:
bernuli_delta_mini_batch = np.ones((len(node_a_membership), 1)) * (1 - self.delta)
Z_constant_mini_batch = bernuli_delta_mini_batch.copy()
for k in range(self.k):
if links_flag:
bernuli_beta_k_mini_batch = np.ones((len(node_a_membership), 1)) * self.beta[k]
else:
bernuli_beta_k_mini_batch = np.ones((len(node_a_membership), 1)) * (1 - self.beta[k])
pi_a_k = (node_a_membership[:, k]).reshape(len(node_a_membership), 1)
pi_b_k = (node_b_membership[:, k]).reshape(len(node_b_membership), 1)
Z_constant_mini_batch += (bernuli_beta_k_mini_batch - bernuli_delta_mini_batch) * pi_a_k * pi_b_k
return Z_constant_mini_batch
def function_f_ab_k_k(self, node_a_membership, node_b_membership, observation_ab, k):
f_ab_k_k = bernuli_distrbution(observation_ab, self.beta[k]) * node_a_membership[k] * node_b_membership[k]
return f_ab_k_k
def function_f_ab_k_mini_batch_pi(self, k, node_a_membership, node_b_membership, links_index):
bernuli_delta_mini_batch = np.ones((len(node_a_membership), 1)) * (1 - self.delta)
bernuli_delta_mini_batch[links_index] = self.delta
bernuli_beta_k_mini_batch = np.ones((len(node_a_membership), 1)) * (1 - self.beta[k])
bernuli_beta_k_mini_batch[links_index] = self.beta[k]
node_a_k = (node_a_membership[:, k]).reshape(len(node_a_membership), 1)
node_b_k = (node_b_membership[:, k]).reshape(len(node_b_membership), 1)
f_ab_k_mini_batch = node_a_k * (
bernuli_beta_k_mini_batch * node_b_k + bernuli_delta_mini_batch * (1 - node_b_k))
return f_ab_k_mini_batch
def function_f_ab_k_k_mini_batch(self, k, node_a_membership, node_b_membership, link_flag):
if link_flag:
f_ab_k_k_mini_batch = np.ones((len(node_a_membership), 1)) * self.beta[k]
else:
f_ab_k_k_mini_batch = np.ones((len(node_a_membership), 1)) * (1 - self.beta[k])
f_ab_k_k_mini_batch = f_ab_k_k_mini_batch * (node_a_membership[:, k]).reshape(len(node_b_membership), 1) * (
node_b_membership[:, k]).reshape(len(node_b_membership), 1)
return f_ab_k_k_mini_batch
def update_phi(self, batch_nodes_index, step_size, n_list_set):
n = self.mini_batch_nodes
grad_phi = np.zeros((n, self.k))
# select edges
node_a = np.zeros(n * n).astype(int)
node_b = np.zeros(n * n).astype(int)
y_mini_batch = np.zeros(n * n).astype(int)
corrections = np.zeros(n * n)
for i, node in enumerate(batch_nodes_index):
# deal with links
node_neighbors = self.node_neighbors_dict[node]
links_n = len(node_neighbors)
node_a[i * n:i * n + n] = node
node_b[i * n:i * n + links_n] = node_neighbors
y_mini_batch[i * n:i * n + links_n] = 1
# deal with non-links
non_neighbors = np.setdiff1d(n_list_set, node_neighbors)
non_neighbors = np.setdiff1d(non_neighbors, np.array([node]))
np.random.shuffle(non_neighbors)
sampled_node_neighbors = non_neighbors[:n - links_n]
node_b[(i * n + links_n):(i * n + n)] = sampled_node_neighbors
corrections[i * n: i * n + links_n] = 1
corrections[(i * n + links_n):(i * n + n)] = float(self.n - links_n - 1) / (self.mini_batch_nodes - links_n)
corrections = corrections.reshape(n, n)
pi_a = self.pi[node_a]
pi_b = self.pi[node_b]
phi_a = self.phi[node_a]
links_index = np.where(y_mini_batch == 1)[0]
Z_ab_mini_batch = self.Z_constant_mini_batch_phi(pi_a, pi_b, links_index)
for k in range(self.k):
f_ab_k_mini_batch = self.function_f_ab_k_mini_batch_pi(k, pi_a, pi_b, links_index)
phi_a_k = (phi_a[:, k]).reshape(n * n, 1)
temp_denumerator = Z_ab_mini_batch * phi_a_k
denumerator = temp_denumerator.copy()
index_zero = np.where(temp_denumerator == 0)[0]
denumerator[index_zero] = 10 ** (-25)
temp = f_ab_k_mini_batch / denumerator - np.ones((n * n, 1)) / ((np.sum(phi_a,
axis=1)).reshape(
n * n, 1))
temp = temp.reshape(n, n) * corrections
grad_phi[:, k] = np.sum(temp, axis=1)
temp_phi = np.abs(self.phi[batch_nodes_index] + step_size / 2 * (
- self.phi[batch_nodes_index] * self.gamma_scale + grad_phi * self.phi[batch_nodes_index]))
return temp_phi
# @do_profile(follow=[])
def update_theta(self, step_size):
grad_theta = np.zeros((self.k, 2))
sample_non_edges_index = np.random.randint(self.nonedges_n, size=self.sampled_non_edges_n)
non_edges_index_a = self.nonedges[0][sample_non_edges_index]
non_edges_index_b = self.nonedges[1][sample_non_edges_index]
pi_a_non_links = self.pi[non_edges_index_a]
pi_b_non_links = self.pi[non_edges_index_b]
z_ab_non_links = self.Z_constant_mini_batch(pi_a_non_links, pi_b_non_links, links_flag=False)
edges_index_a = self.edges[0]
edges_index_b = self.edges[1]
pi_a_links = self.pi[edges_index_a]
pi_b_links = self.pi[edges_index_b]
z_ab_links = self.Z_constant_mini_batch(pi_a_links, pi_b_links, links_flag=True)
correction = float(self.nonedges_n) / len(non_edges_index_a)
for k in range(self.k):
f_ab_kk_mini_batch_links = self.function_f_ab_k_k_mini_batch(k, pi_a_links, pi_b_links, link_flag=True)
links_term = f_ab_kk_mini_batch_links / z_ab_links
f_ab_kk_mini_batch_non_links = self.function_f_ab_k_k_mini_batch(k, pi_a_non_links, pi_b_non_links,
link_flag=False)
non_links_term = (f_ab_kk_mini_batch_non_links / z_ab_non_links) * correction
theta_k = (np.sum(self.theta, axis=1))[k]
if self.theta[k][0] < 10 ** (-50):
self.theta[k][0] = 10 ** (-50)
if self.theta[k][1] < 10 ** (-50):
self.theta[k][1] = 10 ** (-50)
# print(self.theta[k][0], self.theta[k][1], theta_k)
grad_theta[k][0] = np.sum(links_term * (-1.0 / theta_k)) \
+ np.sum(non_links_term * (1.0 / self.theta[k][0] - 1.0 / theta_k))
grad_theta[k][1] = np.sum(links_term * (1.0 / self.theta[k][1] - 1.0 / theta_k)) \
+ np.sum(non_links_term * (-1.0 / theta_k))
temp_theta = np.abs(
self.theta + (step_size / 2) * (-self.theta * self.gamma_scale + grad_theta * self.theta))
return temp_theta
def train_one_epoch(self, step_size, n_list_set):
batch_nodes_index = | np.random.choice(self.n, size=self.mini_batch_nodes, replace=False) | numpy.random.choice |
import copy
import functools
import gc
from hfutils.constants import TASK_TO_LABELS
from seaborn.distributions import histplot
import torch
import logging
import numpy as np
from transformers.data.data_collator import (
DataCollatorForSeq2Seq,
default_data_collator,
)
from transformers import T5ForConditionalGeneration, T5Tokenizer
import seaborn as sns
import matplotlib
import matplotlib.pyplot as plt
from sklearn.mixture import GaussianMixture
from scipy.optimize import minimize
from sklearn.mixture import GaussianMixture
import os
import sys
from torch.nn.modules.activation import Threshold
from datasets import Dataset, concatenate_datasets
from datasets import load_dataset, load_metric
from tqdm import tqdm
from transformers import AutoTokenizer, AutoModel, T5ForConditionalGeneration
from torch.utils.data import (
DataLoader,
RandomSampler,
SequentialSampler,
TensorDataset,
dataloader,
)
from sklearn.model_selection import train_test_split
from hfutils.logger import Logger
from hfutils.constants import token2label
from hfutils.arg_parser import HfArguments
from hfutils.loader import ModelLoader, DatasetLoader
from hfutils.temperature_scaling import ModelWithTemperature
from hfutils.monte_carlo import monte_carlo_bounds
from hfutils.calibration import agg_logits, g_scaling_helper, temperature_scaling_helper, temperature_scale, temperature_scaling
home_dir = os.path.expanduser(("~"))
base_dir = os.path.join(home_dir, os.path.join("model-finetune", "outputs", "google"))
task_name = "mnli" # HACK the longest number of labels
tokenizer = T5Tokenizer.from_pretrained(f"{home_dir}/HuggingFace/google/t5-small-lm-adapt", use_fast=False)
label_tokens = [
tokenizer(label, max_length=2, truncation=True).input_ids[0]
for label in TASK_TO_LABELS[task_name]
if label is not None
]
model_keys = [
"S",
"M",
"L",
"XL",
]
device_map = [
"cuda:0",
"cuda:0",
"cuda:0",
"cuda:1",
]
energy_discount_factor = [
1 / 40,
3 / 40,
10 / 40,
40 / 40,
]
model_paths = [
f"{base_dir}/t5-small-lm-adapt/all/checkpoint-4500",
f"{base_dir}/t5-base-lm-adapt/all/checkpoint-2000",
f"{base_dir}/t5-large-lm-adapt/all/checkpoint-1500",
f"{base_dir}/t5-xl-lm-adapt/all/checkpoint-1500",
]
model_energy = dict(zip(model_keys, energy_discount_factor))
model_paths = dict(zip(model_keys, model_paths))
model_device = dict(zip(model_keys, device_map))
logger = Logger(__file__, "info", 5000000, 5)
models = dict()
for key in model_paths:
logger.debug("key %s, path %s, device %s", key, model_paths[key], model_device[key])
models[key] = T5ForConditionalGeneration.from_pretrained(model_paths[key])
models[key] = models[key].to(model_device[key])
models[key].eval()
torch.cuda.empty_cache()
gc.collect()
logger.info("model loaded")
# ------------- Dataset Prepare --------------
from hfutils.loader import t5_preprocess_function, load_glue_val
from functools import partial
preprocess_function = partial(
t5_preprocess_function,
tokenizer=tokenizer,
padding="max_length",
max_length=128,
)
eval_dataset = load_glue_val(preprocess_function).shuffle()
# eval_dataset = eval_dataset.select([x for x in range(1000)])
data_collator = DataCollatorForSeq2Seq(tokenizer)
# train_len = int(len(eval_dataset) * 0.4)
split_dataset = eval_dataset.train_test_split(train_size=0.4)
train, test = split_dataset["train"], split_dataset["test"]
# train_raw, test_raw = split_dataset["train"], split_dataset["test"]
print(train, test)
train_len = len(train)
test_len = len(test)
train_dataloader = DataLoader(
train,
collate_fn=data_collator,
batch_size=16,
)
test_dataloader = DataLoader(
test,
collate_fn=data_collator,
batch_size=16,
)
m = torch.nn.Softmax(dim=1)
logger.info("data loaded")
# ------------- Train Temperature --------------
print("temperature loaded")
n_models = len(model_keys)
num_labels = 0
def model_inference(model, batch, temperature=None, device="cuda:0"):
input_ids = batch["input_ids"]
attention_mask = batch["attention_mask"]
input_ids = input_ids.to(device)
attention_mask = attention_mask.to(device)
outputs = model.generate(
input_ids=input_ids,
attention_mask=attention_mask,
do_sample=False, # disable sampling to test if batching affects output
return_dict_in_generate=True,
output_scores=True,
)
logits = outputs.scores[0][:, label_tokens]
if temperature is not None:
logits = temperature(logits)
return logits
# ============= COLLECT TRAIN LOGITS =================
# import torch.multiprocessing as mp
labels_list = []
for batch in tqdm(train_dataloader, desc="Collect Labels"):
label = token2label(batch["labels"][:, 0], label_tokens)
labels_list += label
labels = torch.as_tensor(labels_list, dtype=torch.int64)
# def run_train(key, model_outputs):
# all_logits = []
# # labels_list = []
# for batch in tqdm(train_dataloader, desc="Collect Train Data"):
# # label = token2label(batch["labels"][:, 0], label_tokens)
# logits = model_inference(models[key], batch, device=model_device[key])
# all_logits.append(logits)
# # if len(labels_list) < train_len:
# # labels_list += label
# all_logits = torch.cat(all_logits)
# model_outputs[key] = all_logits
# # labels = torch.as_tensor(labels_list, dtype=torch.int64)
# # return labels
# manager = mp.Manager()
# model_outputs = manager.dict()
# if __name__ == '__main__':
# mp.set_start_method('spawn')
# pool = mp.Pool(len(model_keys))
# pool.map(functools.partial(run_train, model_outputs=model_outputs), model_keys)
model_outputs = {}
for key in model_keys:
all_logits = []
# labels_list = []
for batch in tqdm(train_dataloader, desc=f"Collect Train Data {key}"):
# label = token2label(batch["labels"][:, 0], label_tokens)
logits = model_inference(models[key], batch, device=model_device[key])
all_logits.append(logits)
# if len(labels_list) < train_len:
# labels_list += label
all_logits = torch.cat(all_logits)
model_outputs[key] = all_logits
# labels = torch.as_tensor(labels_list, dtype=torch.int64)
# ============= TRAIN TEMPERATURE =============
epoches = [
500,
500,
500,
500
]
model_epoches = dict(zip(model_keys, epoches))
model_temperature = g_scaling_helper(model_outputs, labels, model_epoches, len(label_tokens))
print("temperature", model_temperature)
for key in model_keys:
model_outputs[key] = model_temperature[key](model_outputs[key])
torch.save(model_temperature[key].state_dict(), os.path.join("tests", "confidence", f"t5_glue_glayer-{key}"))
# ============= TRAIN HYPERPARAMETER =============
num_models = len(model_keys)
m = torch.nn.Softmax(dim=1)
# hist_probs = []
# hist_logits = None
# for i, key in enumerate(model_keys):
# hist_logits = agg_logits(
# hist_logits if key != model_keys[-1] else None,
# model_outputs[key],
# 0.6
# )
# probs, _ = torch.max(m(hist_logits), dim=1)
# probs = probs.detach().cpu().numpy()
# hist_probs.append(probs)
def total_reward(threshold):
reward = 0
energy = 0
mask = np.array([False] * train_len)
alpha = threshold[-1]
threshold = threshold[:-1]
hist_logits = None
for i, key in enumerate(model_keys):
hist_logits = agg_logits(
hist_logits if key != model_keys[-1] else None,
model_outputs[key],
alpha
)
probs, _ = torch.max(m(hist_logits), dim=1)
probs = probs.detach().cpu().numpy()
# probs = hist_probs[i]
processed = (
(probs >= threshold[i])
if key in model_keys[:-1]
else np.array([True] * train_len)
)
# print(mask, processed)
processed_probs = probs[(~mask) & processed]
reward += np.around( | np.sum(processed_probs) | numpy.sum |
import numpy as np
metric_optimum = {
"MAE": "min",
"MSE": "min",
"accuracy": "max",
"sensitivity": "max",
"specificity": "max",
"PPV": "max",
"NPV": "max",
"BA": "max",
"loss": "min",
}
class MetricModule:
def __init__(self, metrics, n_classes=2):
self.n_classes = n_classes
# Check if wanted metrics are implemented
list_fn = [
method_name
for method_name in dir(MetricModule)
if callable(getattr(MetricModule, method_name))
]
self.metrics = dict()
for metric in metrics:
if f"{metric.lower()}_fn" in list_fn:
self.metrics[metric] = getattr(MetricModule, f"{metric.lower()}_fn")
else:
raise ValueError(
f"The metric {metric} is not implemented in the module"
)
def apply(self, y, y_pred):
"""
This is a function to calculate the different metrics based on the list of true label and predicted label
Args:
y (List): list of labels
y_pred (List): list of predictions
Returns:
(Dict[str:float]) metrics results
"""
if y is not None and y_pred is not None:
results = dict()
y = np.array(y)
y_pred = np.array(y_pred)
for metric_key, metric_fn in self.metrics.items():
metric_args = list(metric_fn.__code__.co_varnames)
if "class_number" in metric_args:
for class_number in range(self.n_classes):
results[f"{metric_key}-{class_number}"] = metric_fn(
y, y_pred, class_number
)
else:
results[metric_key] = metric_fn(y, y_pred)
else:
results = dict()
return results
@staticmethod
def mae_fn(y, y_pred):
"""
Args:
y (List): list of labels
y_pred (List): list of predictions
Returns:
(float) mean absolute error
"""
return np.mean(np.abs(y - y_pred))
@staticmethod
def mse_fn(y, y_pred):
"""
Args:
y (List): list of labels
y_pred (List): list of predictions
Returns:
(float) mean squared error
"""
return np.mean(np.square(y - y_pred))
@staticmethod
def accuracy_fn(y, y_pred):
"""
Args:
y (List): list of labels
y_pred (List): list of predictions
Returns:
(float) accuracy
"""
true = np.sum(y_pred == y)
return true / len(y)
@staticmethod
def sensitivity_fn(y, y_pred, class_number):
"""
Args:
y (List): list of labels
y_pred (List): list of predictions
class_number (int): number of the class studied
Returns:
(float) sensitivity
"""
true_positive = np.sum((y_pred == class_number) & (y == class_number))
false_negative = np.sum((y_pred != class_number) & (y == class_number))
if (true_positive + false_negative) != 0:
return true_positive / (true_positive + false_negative)
else:
return 0.0
@staticmethod
def specificity_fn(y, y_pred, class_number):
"""
Args:
y (List): list of labels
y_pred (List): list of predictions
class_number (int): number of the class studied
Returns:
(float) specificity
"""
true_negative = np.sum((y_pred != class_number) & (y != class_number))
false_positive = np.sum((y_pred == class_number) & (y != class_number))
if (false_positive + true_negative) != 0:
return true_negative / (false_positive + true_negative)
else:
return 0.0
@staticmethod
def ppv_fn(y, y_pred, class_number):
"""
Args:
y (List): list of labels
y_pred (List): list of predictions
class_number (int): number of the class studied
Returns:
(float) positive predictive value
"""
true_positive = np.sum((y_pred == class_number) & (y == class_number))
false_positive = np.sum((y_pred == class_number) & (y != class_number))
if (true_positive + false_positive) != 0:
return true_positive / (true_positive + false_positive)
else:
return 0.0
@staticmethod
def npv_fn(y, y_pred, class_number):
"""
Args:
y (List): list of labels
y_pred (List): list of predictions
class_number (int): number of the class studied
Returns:
(float) negative predictive value
"""
true_negative = np.sum((y_pred != class_number) & (y != class_number))
false_negative = np.sum((y_pred != class_number) & (y == class_number))
if (true_negative + false_negative) != 0:
return true_negative / (true_negative + false_negative)
else:
return 0.0
@staticmethod
def ba_fn(y, y_pred, class_number):
"""
Args:
y (List): list of labels
y_pred (List): list of predictions
class_number (int): number of the class studied
Returns:
(float) balanced accuracy
"""
return (
MetricModule.sensitivity_fn(y, y_pred, class_number)
+ MetricModule.specificity_fn(y, y_pred, class_number)
) / 2
@staticmethod
def confusion_matrix_fn(y, y_pred):
"""
Args:
y (List): list of labels
y_pred (List): list of predictions
Returns:
(Dict[str:float]) confusion matrix
"""
true_positive = np.sum((y_pred == 1) & (y == 1))
true_negative = np.sum((y_pred == 0) & (y == 0))
false_positive = | np.sum((y_pred == 1) & (y == 0)) | numpy.sum |
#Version
#--------
#Companion Code Version: 1.0
#
#
#Citation
#---------
#Any part of this code used in your work should be cited as follows:
#
#<NAME>, <NAME> and <NAME>, "Distributed learning of human mobility patterns from cellular network data,"
#in Proc. 51st Annu. Conf. on Information Sciences and Systems (CISS), 2017, Companion Code, ver. 1.0.
#--------------------------------------------------------------------------
#
#CODE OUTLINE:
#We first generate synthetic data and then distribute data across sites in such a way that each site contains a subset
#of the global dictionary. Finally, representation errors for cloud NN-K-SVD, localized NN-K-SVD and centralized NN-K-SVD are computed.
import time
import random
import numpy as np
from generategraph import gengraph
from sparsecoding import sparse_encode_nnmp
from centralizeddictionarylearning import dict_learning_nnksvd
from distributeddictionarylearning import cloud_nnksvd
def generate_dict(n_features, n_atom):
D = np.abs(np.random.randn(n_features, n_atom))
D /= np.tile( np.sqrt((D ** 2).sum(axis=0)), (n_features, 1) )
return D
NodeN = 10
p = 0.5
networkg = gengraph(NodeN, p)
""" Generate dictionary and data """
random.seed(time.time())
n_features = 20
TotalAtoms = 50
print("generating dictionary...")
D_gt = generate_dict(n_features, TotalAtoms)
LocalAtomN = 40
sp = 3
LocalTrainSamples = 150
TestSamples = 500
Y = [[] for i in range(NodeN)]
for i in range(NodeN):
indexes = np.random.permutation(range(TotalAtoms))[:LocalAtomN]
tempD = D_gt[:,indexes]
tempCoef = np.zeros([LocalAtomN, LocalTrainSamples])
for j in range(LocalTrainSamples):
atomidx = np.random.permutation(range(LocalAtomN))[:sp]
tempCoef[atomidx,j] = np.abs(np.random.randn(sp))
tempY = np.dot(tempD, tempCoef)
tempY /= np.tile( np.sqrt((tempY ** 2).sum(axis=0)), (n_features, 1) )
Y[i] = tempY
Y_central = np.zeros([n_features, LocalTrainSamples*NodeN])
for i in range(NodeN):
Y_central[:,np.arange(LocalTrainSamples*i,LocalTrainSamples*(i+1))] = Y[i]
TestCoef = np.zeros([TotalAtoms, TestSamples])
for i in range(TestSamples):
atomidx = np.random.permutation(range(TotalAtoms))[:sp]
TestCoef[atomidx,i] = np.abs(np.random.randn(sp))
Ytest = np.dot(D_gt, TestCoef)
Ytest /= np.tile( np.sqrt((Ytest ** 2).sum(axis=0)), (n_features, 1) )
""" set parameters """
max_iter = 30
powerIterations = 5
consensusIterations = 10
D_init = generate_dict(n_features, TotalAtoms)
d_init = np.abs(np.random.randn(1,n_features))
d_init /= np.linalg.norm(d_init)
coefupdateiter = 20
""" distributed dictionary learning """
Dksvd = cloud_nnksvd(Y, TotalAtoms, D_init, sp, NodeN, networkg, d_init, max_iter, coefupdateiter, powerIterations, consensusIterations)
TestDistriErrorMat = np.zeros([max_iter+1, NodeN])
for i in range(max_iter+1):
print(i)
for j in range(NodeN):
temptheta2 = sparse_encode_nnmp(Ytest, Dksvd[:,:,i,j], sp, coefupdateiter)
TestDistriErrorMat[i,j] = np.mean( ((Ytest - np.dot(Dksvd[:,:,i,j], temptheta2)) ** 2).sum(axis=0) )
""" localized dictionary learning """
D_local = | np.zeros([n_features, TotalAtoms, max_iter+1, NodeN]) | numpy.zeros |
"""
================================================
My own Gaussion Mixture Model for SV genotyping.
Learn form scikit-learn
================================================
Author : <NAME>
Date : 2014-01-06 14:33:45
"""
import sys
import numpy as np
from scipy import linalg
from sklearn import cluster
from sklearn.base import BaseEstimator
from sklearn.utils.extmath import logsumexp
EPS = np.finfo(float).eps
class GMM ( BaseEstimator ) :
"""
Copy from scikit-learn
"""
def __init__(self, n_components=1, covariance_type='diag', random_state=None, thresh=1e-2, min_covar=1e-3,
n_iter=100, n_init=10, params='wmc', init_params='wmc'):
self.n_components = n_components
self.covariance_type = covariance_type
self.thresh = thresh
self.min_covar = min_covar
self.random_state = random_state
self.n_iter = n_iter
self.n_init = n_init
self.params = params
self.init_params = init_params
self.init_means = []
self.init_covars = []
self.category = [] # For genotype
if not covariance_type in ['spherical', 'tied', 'diag', 'full']:
raise ValueError( 'Invalid value for covariance_type: %s' % covariance_type )
if n_init < 1: raise ValueError('GMM estimation requires at least one run')
self.weights_ = np.ones(self.n_components) / self.n_components
# flag to indicate exit status of fit() method: converged (True) or
# n_iter reached (False)
def score_samples(self, X):
"""Return the per-sample likelihood of the data under the model.
Compute the log probability of X under the model and
return the posterior distribution (responsibilities) of each
mixture component for each element of X.
Parameters
----------
X: array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
logprob : array_like, shape (n_samples,)
Log probabilities of each data point in X.
responsibilities : array_like, shape (n_samples, n_components)
Posterior probabilities of each mixture component for each
observation
"""
X = np.asarray(X)
if X.ndim == 1:
X = X[:, np.newaxis]
if X.size == 0:
return np.array([]), np.empty((0, self.n_components))
if X.shape[1] != self.means_.shape[1]:
raise ValueError('The shape of X is not compatible with self')
lpr = (log_multivariate_normal_density(X, self.means_, self.covars_,self.covariance_type)
+ np.log(self.weights_))
logprob = logsumexp(lpr, axis=1)
responsibilities = np.exp(lpr - logprob[:, np.newaxis])
return logprob, responsibilities
def predict(self, X):
"""
Predict label for data.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = (n_samples,)
"""
logprob, responsibilities = self.score_samples(X)
return responsibilities.argmax(axis=1)
def predict_proba(self, X):
"""
Predict posterior probability of data under each Gaussian
in the model.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
responsibilities : array-like, shape = (n_samples, n_components)
Returns the probability of the sample for each Gaussian
(state) in the model.
"""
logprob, responsibilities = self.score_samples(X)
return responsibilities
def fit(self, X):
"""
Copy form scikit-learn: gmm.py
Estimate model parameters with the expectation-maximization
algorithm.
A initialization step is performed before entering the em
algorithm. If you want to avoid this step, set the keyword
argument init_params to the empty string '' when creating the
GMM object. Likewise, if you would like just to do an
initialization, set n_iter=0.
Parameters
----------
X : array_like, shape (n, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
"""
X = np.asarray(X, dtype=np.float)
if X.ndim == 1:
X = X[:, np.newaxis]
if X.shape[0] < self.n_components:
raise ValueError(
'GMM estimation with %s components, but got only %s samples' %
(self.n_components, X.shape[0]))
lowest_bias = np.infty
c1,c2,c3 = '1/1', '0/1', '0/0'
m1,m2,m3 = 0.001 , 0.5 , 1.0
v1,v2,v3 = 0.002, 0.002, 0.002
category = np.array([ [c1,c2,c3],
[c1,c2], [c1,c3], [c2,c3] ,
[c1] , [c2] , [c3] ])
init_means = np.array([ [[ m1],[ m2] , [ m3]],
[[ m1],[ m2]], [[m1],[m3]], [[m2],[m3]],
[[m1]] , [[m2]] , [[m3]] ])
init_covars = np.array([ [[[ v1]],[[ v2]],[[ v3]]],
[[[ v1]],[[ v2]]], [[[ v1]],[[ v3]]], [[[ v2]],[[ v3]]],
[[[ v1]]] , [[[ v2]]] , [[[ v3]]] ])
bestCovars, bestMeans, bestWeights, bestConverged, bestCategory = [], [], [], [], []
for i, (m,v,c) in enumerate( zip(init_means, init_covars, category) ) :
if i == 0 and self.n_components != 3 : continue
if i < 4 and self.n_components == 1 : continue
self.init_means = np.array(m)
self.init_covars = np.array(v)
self.category = np.array(c)
best_params,bias = self.training(X)
if lowest_bias > bias :
lowest_bias = bias
bestCovars = best_params['covars']
bestMeans = best_params['means']
bestWeights = best_params['weights']
bestConverged = best_params['converged']
bestCategory = best_params['category']
if self.n_components == 3 : break
if self.n_components == 2 and i == 3 : break
bestWeights = np.tile(1.0 / self.n_components, self.n_components)
self.covars_ = bestCovars
self.means_ = bestMeans
self.weights_ = bestWeights
self.converged_ = bestConverged
self.category = bestCategory
return self
####
def training(self, X):
max_log_prob = -np.infty
lowest_bias = np.infty
wmin, wmax = 0.8, 1.2 # Factor intervel [wmin, wmax]
for w in np.linspace(wmin, wmax, self.n_init):
if 'm' in self.init_params or not hasattr(self, 'means_'):
#self.means_ = cluster.KMeans(n_clusters=self.n_components, random_state=self.random_state).fit(X).cluster_centers_
self.means_ = w * self.init_means
if 'w' in self.init_params or not hasattr(self, 'weights_'):
self.weights_= np.tile(1.0 / self.n_components, self.n_components)
if 'c' in self.init_params or not hasattr(self, 'covars_'):
"""
cv = np.cov(X.T) + self.min_covar * np.eye(X.shape[1])
if not cv.shape :
cv.shape = (1, 1)
self.covars_ = distribute_covar_matrix_to_match_covariance_type(cv, self.covariance_type, self.n_components)
"""
self.covars_ = self.init_covars
# EM algorithms
log_likelihood = []
# reset self.converged_ to False
self.converged_= False
for i in range(self.n_iter):
# Expectation step
curr_log_likelihood, responsibilities = self.score_samples(X)
log_likelihood.append(curr_log_likelihood.sum())
# Check for convergence.
if i > 0 and abs(log_likelihood[-1] - log_likelihood[-2]) < self.thresh:
self.converged_ = True
break
#Maximization step
self._do_mstep(X, responsibilities, self.params, self.min_covar)
if self.n_components == 3:
curr_bias =(self.means_[0][0]-self.init_means[0][0])+np.abs(self.means_[1][0]-self.init_means[1][0])+(self.init_means[2][0]-self.means_[2][0])
elif self.n_components == 2:
curr_bias =np.abs(self.means_[0][0] - self.init_means[0][0]) + np.abs(self.init_means[1][0] - self.means_[1][0])
elif self.n_components == 1:
curr_bias =np.abs (self.means_[0][0] - self.init_means[0][0])
else :
print >> sys.stderr, '[ERROR] The companent could only between [1,3]. But yours is ', self.n_components
sys.exit(1)
self.Label2Genotype()
if w == wmin:
max_log_prob = log_likelihood[-1]
best_params = {'weights':self.weights_,
'means':self.means_,
'covars':self.covars_,
'converged':self.converged_,
'category':self.category}
if self.converged_:
lowest_bias = curr_bias
if self.converged_ and lowest_bias > curr_bias:
max_log_prob = log_likelihood[-1]
lowest_bias = curr_bias
best_params = {'weights': self.weights_,
'means': self.means_,
'covars': self.covars_,
'converged': self.converged_,
'category':self.category}
# check the existence of an init param that was not subject to
# likelihood computation issue.
if np.isneginf(max_log_prob) and self.n_iter:
raise RuntimeError(
"EM algorithm was never able to compute a valid likelihood " +
"given initial parameters. Try different init parameters " +
"(or increasing n_init) or check for degenerate data." )
# if neendshift :
# self.covars_ = tmp_params['covars']
# self.means_ = tmp_params['means']
# self.weights_ = tmp_params['weights']
# self.converged_ = tmp_params['converged']
# self.category = tmp_params['category']
return best_params, lowest_bias
def _do_mstep(self, X, responsibilities, params, min_covar=0):
"""
Perform the Mstep of the EM algorithm and return the class weihgts.
"""
weights = responsibilities.sum(axis=0)
weighted_X_sum = np.dot(responsibilities.T, X)
inverse_weights = 1.0 / (weights[:, np.newaxis] + 10 * EPS)
if 'w' in params:
self.weights_ = (weights / (weights.sum() + 10 * EPS) + EPS)
if 'm' in params:
self.means_ = weighted_X_sum * inverse_weights
if 'c' in params:
covar_mstep_func = _covar_mstep_funcs[self.covariance_type]
self.covars_ = covar_mstep_func(self, X, responsibilities, weighted_X_sum, inverse_weights,min_covar)
return weights
"""
Here is just for genotyping process
"""
# Decide the different guassion mu(mean) to seperate the genotype
def Label2Genotype(self):
label2genotype = {}
if self.converged_:
if len(self.means_) > 3 :
print >> sys.stderr, 'Do not allow more than 3 components. But you set', len(self.means_)
sys.exit(1)
for label,mu in enumerate(self.means_[:,0]):
best_distance, bestIndx = np.infty, 0
for i,m in enumerate(self.init_means[:,0]):
distance = np.abs(mu - m)
if distance < best_distance:
bestIndx = i
best_distance = distance
label2genotype[label] = self.category[bestIndx]
# Put False if there are more than one 'label' points to the same 'genotype'
g2c = {v:k for k,v in label2genotype.items()}
if len(label2genotype) != len(g2c): self.converged_ = False
else :
label2genotype = { label: './.' for label in range( self.n_components ) }
return label2genotype
def Mendel(self, genotype, sample2col, family):
ngIndx = []
m,n,num = 0.0,0.0,0 # m is match; n is not match
for k,v in family.items():
#if v[0] not in sample2col or v[1] not in sample2col : continue
if k not in sample2col or v[0] not in sample2col or v[1] not in sample2col: continue
if k not in sample2col :
print >> sys.stderr, 'The sample name is not in vcf file! ', k
sys.exit(1)
# c1 is son; c2 and c3 are the parents
c1,c2,c3 = genotype[ sample2col[k] ], genotype[ sample2col[v[0]] ], genotype[ sample2col[v[1]] ]
if c1 == './.' or c2 == './.' or c3 == './.': continue
num += 1;
ng = False
if c2 == c3 :
if c2 == '0/0' or c2 == '1/1' :
if c1 == c2 : m += 1
else :
n += 1
ng = True
else : # c2 == '0/1' and c3 == '0/1'
m += 1
elif c2 == '0/1' and c3 == '1/1' :
if c1 == '0/0' :
n += 1
ng = True
else : m += 1
elif c2 == '0/1' and c3 == '0/0' :
if c1 == '1/1' :
n += 1
ng = True
else : m += 1
elif c2 == '1/1' and c3 == '0/1' :
if c1 == '0/0' :
n += 1
ng = True
else : m += 1
elif c2 == '1/1' and c3 == '0/0' :
if c1 == '1/1' or c1 == '0/0':
n += 1
ng = True
else : m += 1
elif c2 == '0/0' and c3 == '0/1' :
if c1 == '1/1' :
n += 1
ng = True
else : m += 1
elif c2 == '0/0' and c3 == '1/1' :
if c1 == '0/0' or c1 == '1/1' :
n += 1
ng = True
else : m += 1
if ng :
ngIndx.append(sample2col[k])
ngIndx.append(sample2col[v[0]])
ngIndx.append(sample2col[v[1]])
return m,n,num,set(ngIndx)
###
def log_multivariate_normal_density(X, means, covars, covariance_type='full'):
"""
Log probability for full covariance matrices.
"""
X = np.asarray(X)
if X.ndim == 1:
X = X[:, np.newaxis]
if X.size == 0:
return np.array([])
if X.shape[1] != means.shape[1]:
raise ValueError('The shape of X is not compatible with self')
log_multivariate_normal_density_dict = {
'full' : _log_multivariate_normal_density_full
}
return log_multivariate_normal_density_dict[covariance_type]( X, means, covars )
def _log_multivariate_normal_density_full(X, means, covars, min_covar=1.e-7):
"""
Log probability for full covariance matrices.
"""
if hasattr(linalg, 'solve_triangular'):
# only in scipy since 0.9
solve_triangular = linalg.solve_triangular
else:
# slower, but works
solve_triangular = linalg.solve
n_samples, n_dim = X.shape
nmix = len(means)
log_prob = np.empty((n_samples, nmix))
for c, (mu, cv) in enumerate(zip(means, covars)):
try:
cv_chol = linalg.cholesky(cv, lower=True)
except linalg.LinAlgError:
# The model is most probabily stuck in a component with too
# few observations, we need to reinitialize this components
cv_chol = linalg.cholesky(cv + min_covar * np.eye(n_dim),
lower=True)
cv_log_det = 2 * np.sum(np.log(np.diagonal(cv_chol)))
cv_sol = solve_triangular(cv_chol, (X - mu).T, lower=True).T
log_prob[:, c] = - .5 * (np.sum(cv_sol ** 2, axis=1) +
n_dim * np.log(2 * np.pi) + cv_log_det)
return log_prob
def distribute_covar_matrix_to_match_covariance_type( tied_cv, covariance_type, n_components) :
"""
Create all the covariance matrices from a given template
"""
if covariance_type == 'spherical':
cv = np.tile(tied_cv.mean() * np.ones(tied_cv.shape[1]),
(n_components, 1))
elif covariance_type == 'tied':
cv = tied_cv
elif covariance_type == 'diag':
cv = np.tile(np.diag(tied_cv), (n_components, 1))
elif covariance_type == 'full':
cv = np.tile(tied_cv, (n_components, 1, 1))
else:
raise ValueError("covariance_type must be one of " +
"'spherical', 'tied', 'diag', 'full'")
return cv
def _covar_mstep_full(gmm, X, responsibilities, weighted_X_sum, norm, min_covar):
"""Performing the covariance M step for full cases"""
# Eq. 12 from <NAME>, "Fitting a Conditional Linear Gaussian
# Distribution"
n_features = X.shape[1]
cv = | np.empty((gmm.n_components, n_features, n_features)) | numpy.empty |
'''
hi_rct_sim.py
=== Description ===
Main simulation workhorse for the Heterogeneous-Intent
Randomized Clinical Trial Recommender System
=== Parameters ===
- Unobserved Confounder Distributions: defines the priors on
UCs, U: P(U)
- True Reward Distribution: defines the causal distribution
over P(Y | do(X), U)
- Intent Distributions: define the intent distributions over
latent causes P(I | U)
- Sample Size: determines the same size of the training set,
which will be simulated from parameters above
=== Results ===
- Excel and Graphical representations of u-regret experienced
by agents in the parameterized MABUC
'''
import numpy as np
import plotly as py
import plotly.graph_objs as go
import time
import multiprocessing
from plotly import tools
from joblib import Parallel, delayed
from hi_rct_utl import *
from hi_rct_lrn import HI_RCT_Learner
from hi_rct_actor import HI_RCT_Actor
from hi_rct_agent import *
# ----------------------------------------------------------------
# Configure Simulation Parameters
# ----------------------------------------------------------------
# UC Params
P_S = np.array(
# S = 0 1
[0.5, 0.5]
)
P_R = np.array(
# R = 0 1
[0.5, 0.5]
)
P_U = [P_S, P_R]
# True Reward Params
P_TR = np.array([
# S = 0 0 1 1
# R = 0 1 0 1
[0.7, 0.8, 0.6, 0.7], # X = 0
[0.9, 0.7, 0.7, 0.5] # X = 1
])
# Actor Intent Params
P_I_A0 = np.array([
# I^{A0} = XOR(S,R)
# S = 0 0 1 1
# R = 0 1 0 1
[1.0, 0.0, 0.0, 1.0], # X = 0
[0.0, 1.0, 1.0, 0.0] # X = 1
])
P_I_A1 = np.array([
# I^{A1} = s
# S = 0 0 1 1
# R = 0 1 0 1
[1.0, 1.0, 0.0, 0.0], # X = 0
[0.0, 0.0, 1.0, 1.0] # X = 1
])
P_I_A2 = np.array([
# I^{A2} ~ s
# S = 0 0 1 1
# R = 0 1 0 1
[0.96, 0.96, 0.04, 0.04], # X = 0
[0.04, 0.04, 0.96, 0.96] # X = 1
])
P_I_A3 = np.array([
# I^{A3} ~ XOR(S,R)
# S = 0 0 1 1
# R = 0 1 0 1
[0.96, 0.04, 0.04, 0.96], # X = 0
[0.04, 0.96, 0.96, 0.04] # X = 1
])
P_I = [P_I_A0, P_I_A1, P_I_A2]
P_I_RDT = [P_I_A1, P_I_A2, P_I_A2, P_I_A2, P_I_A2, P_I_A0, P_I_A3, P_I_A3, P_I_A3, P_I_A3]
best_actor_inds = (0, 5) # Used to compare performance against "oracle" agent
U_DOM = X_DOM = Y_DOM = [0, 1]
U_COUNT = len(P_U)
I_COUNT = len(P_I)
# Sampling and MC Parameters
SIM_NAME = "_10act_iec_samp_w_cal"
N = 10000 # Training set sample size
IEC_TOL = 0.10 # IEC Difference Tolerance
CAL_SIZE = 20 # Calibration set size for relevant agents
RDC_N = 1000 # Number of MC simulations for RDC sim
RDC_T = 10000 # Number of trials per MC simulation for RDC sim
VERBOSE = False # Enables [True] / Disables reporting some features
REP_INT = 10 # Interval of sims before reporting
N_CORES = multiprocessing.cpu_count()-1
np.random.seed(0) # For reproducible results
# ----------------------------------------------------------------
# Simulation Functions
# ----------------------------------------------------------------
def gen_sample ():
'''
Simulates the HI-RCT with each unit t consisting of:
[U_t, {I^{A_i}_t}+, X_t, Y_t]
'''
# Draw UC States first
UCs = np.empty((N, U_COUNT), int)
for i, u in enumerate(P_U):
UCs[:, i] = np.random.choice(U_DOM, p=u, size=N)
# Draw Intents, Randomized Treatments, and then Outcomes
ITOs = np.empty((N, I_COUNT + 2), int)
for j in range(N):
u = UCs[j, :]
u_ind = get_dist_index(u)
# Intents drawn from state of U
for i, a in enumerate(P_I):
ITOs[j, i] = np.random.choice(X_DOM, p=a[:, u_ind])
# Treatment assigned at random
x = ITOs[j, I_COUNT] = np.random.choice(X_DOM)
# Reward Y sampled based on U, X
ITOs[j, I_COUNT + 1] = np.random.choice(Y_DOM, p=[1 - P_TR[x, u_ind], P_TR[x, u_ind]])
return np.hstack((UCs, ITOs))
def run_sim (actors, agents, n):
'''
Runs a single MC iteration of the simulation
'''
if (n % REP_INT == 0):
print(" ...starting %d / %d simulations" % (n, RDC_N))
AG_COUNT = len(agents)
ag_reg = np.zeros((AG_COUNT, RDC_T))
ag_opt = np.zeros((AG_COUNT, RDC_T))
for a in agents:
a.clear_hist()
a.calibrate() # For agents that employ HI-RCT only
# RDC Test-set data created a priori for fair comparison
UCs = np.empty((RDC_T, len(P_U)), int)
for i, u in enumerate(P_U):
UCs[:, i] = np.random.choice(U_DOM, p=u, size=RDC_T)
for t in range(RDC_T):
# Get current round's actor intents
u_t = UCs[t, :]
u_ind = get_dist_index(u_t)
# Find the optimal action and reward rate for this t
best_x_t = np.argmax(P_TR[:, u_ind])
max_t = P_TR[best_x_t, u_ind]
i_t = [a.get_intent(u_t) for a in actors]
# Determine chosen action and reward for each agent
# within this trial, t
for a_ind, ag in enumerate(agents):
x_t = ag.choose(i_t)
y_t = np.random.choice(Y_DOM, p=[1 - P_TR[x_t, u_ind], P_TR[x_t, u_ind]])
ag.give_feedback(i_t, x_t, y_t)
r_t = max_t - y_t
ag_reg[a_ind, t] += r_t
ag_opt[a_ind, t] += int(x_t == best_x_t)
return [ag_reg, ag_opt]
def gen_graph (cum_reg, cum_opt, names, colors):
'''
Reporting mechanism that generates graphical reports on the
probability that each agent takes the optimal action and the
agent's cumulative u-regret, both as a function of the current
trial
'''
AG_COUNT = cum_reg.shape[0]
traces = []
fig = tools.make_subplots(rows=1, cols=2, subplot_titles=('Probability of Optimal Action', 'Cumulative u-Regret'))
fig['layout']['xaxis1'].update(title='Trial', range=[0, RDC_T])
fig['layout']['xaxis2'].update(title='Trial', range=[0, RDC_T])
fig['layout']['yaxis1'].update(title='Probability of Optimal Action')
fig['layout']['yaxis2'].update(title='Cumulative u-Regret')
# Plot cumulative u-regret
for a in range(AG_COUNT):
trace = go.Scatter(
x = list(range(RDC_T)),
y = cum_opt[a, :],
line = dict(
color = colors[a]
),
name = names[a]
)
fig.append_trace(trace, 1, 1)
# Plot optimal arm choice
for a in range(AG_COUNT):
trace = go.Scatter(
x = list(range(RDC_T)),
y = cum_reg[a, :],
line = dict(
color = colors[a]
),
name = "[REG]" + names[a],
showlegend = False
)
fig.append_trace(trace, 1, 2)
py.offline.plot(fig, filename=("./plots/cum_reg" + SIM_NAME + ".html"))
# ----------------------------------------------------------------
# Simulation Workhorse
# ----------------------------------------------------------------
if __name__ == "__main__":
start_t = time.clock()
print("=== HI-RCT Simulation Beginning ===")
# NOTE: Can be placed inside of the sim loop to generate each
# MC iteration:
# ----------------
# Generate training data for this run
complete_data = gen_sample()
training_data = complete_data[:,2:]
# Train model on training set to learn IECs
training_model = HI_RCT_Learner(complete_data, training_data, IEC_TOL, U_DOM, X_DOM, Y_DOM, VERBOSE=VERBOSE)
# ----------------
# Configure current run's actors
actors = [HI_RCT_Actor(X_DOM, Y_DOM, a) for a in P_I_RDT]
# Initialize learning agents
agents = [
Agent_HI_RDT(training_model, actors),
Agent_HI_RDT_IEC_Learned(training_model, actors, IEC_TOL),
Agent_HI_RCT_RDT_Rand(training_model, actors, IEC_TOL, CAL_SIZE),
Agent_HI_RCT_RDT_Heur(training_model, actors, IEC_TOL, CAL_SIZE),
Agent_HI_RDT_IEC_Given(training_model, actors, best_actor_inds),
Agent_HI_RDT_IEC_Given_Cal(training_model, actors, best_actor_inds, IEC_TOL, CAL_SIZE),
]
ag_names = [
"HI-RDC-A",
"HI-RDC-L",
"HI-RDC-RCT-R",
"HI-RDC-RCT-H",
"Oracle",
"Oracle w/ Cal",
]
ag_colors = [
('rgb(255, 0, 0)'),
('rgb(0, 0, 255)'),
('rgb(255, 165, 0)'),
('rgb(255, 0, 255)'),
('rgb(0, 128, 0)'),
('rgb(112, 128, 144)'),
('rgb(255, 215, 0)'),
('rgb(128, 128, 0)')
]
AG_COUNT = len(agents)
# Record-keeping data structures across simulations
round_reg = np.zeros((AG_COUNT, RDC_T))
round_opt = np.zeros((AG_COUNT, RDC_T))
cum_reg = np.zeros((AG_COUNT, RDC_T))
cum_reg_rep = np.zeros((AG_COUNT, RDC_N))
# MAIN WORKHORSE:
sim_results = Parallel(n_jobs=N_CORES, verbose=1)(delayed(run_sim)(actors, agents, i) for i in range(RDC_N))
for (ind, r) in enumerate(sim_results):
cum_reg_rep[:, ind] = [np.sum(r[0][i, :]) for i in range(AG_COUNT)]
round_reg += r[0]
round_opt += r[1]
# Reporting phase:
for a in range(AG_COUNT):
cum_reg[a] = np.array([np.sum(round_reg[a, 0:i+1]) for i in range(RDC_T)])
cum_reg = cum_reg / RDC_N
cum_opt = round_opt / RDC_N
gen_graph(cum_reg, cum_opt, ag_names, ag_colors)
np.savetxt("./plots/cum_reg_rep" + SIM_NAME + ".csv", cum_reg_rep, delimiter=",")
| np.savetxt("./plots/cum_reg" + SIM_NAME + ".csv", cum_reg, delimiter=",") | numpy.savetxt |
import sys
import numpy as np
from timeit import default_timer as timer
from scipy.sparse import block_diag, coo_matrix
from common.estimator import EstimatorModel
from common.regression import max_affine_predict
from optim.quadprog import qp_solve, convert_matrix_to_qp_solver_format, QP_BACKEND__DEFAULT
class PCNLSEstimatorModel(EstimatorModel):
"""The model of PCNLS estimators."""
def __init__(
self, weights, nqpiter, seconds,
obj_val, proj_obj_val, max_viol, regularizer, dual_vars,
):
EstimatorModel.__init__(self, weights)
self.regularizer = regularizer
self.nqpiter = nqpiter
self.seconds = seconds
self.obj_val = obj_val
self.proj_obj_val = proj_obj_val
self.max_viol = max_viol
self.dual_vars = dual_vars
def pcnls_train(
X, y, partition,
regularizer=0.0, L=None,
backend=QP_BACKEND__DEFAULT,
verbose=False, init_weights=None, init_dual_vars=None,
):
"""Training a PCNLS estimator.
:param X: data matrix (each row is a sample)
:param y: target vector
:param partition: partition to be induced by the trained max-affine function
:param regularizer: ridge regularization parameter on the gradients
:param L: maximum Lipschitz constant (as the max-norm of the gradients)
:param backend: quadratic programming solver
:param verbose: whether to print verbose output
:param init_weights: warm starting weights for QP
:param init_dual_vars: warm starting dual variables for QP
:return: PCNLSEstimatorModel object having the results
"""
n, d = X.shape
assert len(y) == n
if len(y.shape) > 1:
assert len(y.shape) == 2 and y.shape[1] == 1
y = y.ravel()
if verbose > 0:
print('Training PCNLS, n: {}, K: {}, d: {}, L: {}, regularizer: {}'.format(
partition.npoints, partition.ncells, X.shape[1], L, regularizer,
))
start = timer()
H, g, A, b, cell_idx = pcnls_qp_data(
X, y, partition,
regularizer=regularizer, L=L,
)
if init_weights is not None:
init_weights = init_weights.ravel()
nqpiter = 1
res = qp_solve(
H, g, A, b,
x0=init_weights, y0=init_dual_vars,
backend=backend, verbose=verbose,
)
weights = res.primal_soln
dual_vars = res.dual_soln
max_viol = max(0.0, np.max(A.dot(weights) - b))
obj_val = 0.5*weights.dot(H.dot(weights)) + g.dot(weights)
weights = np.reshape(weights, (partition.ncells, (1+X.shape[1])))
yhat = max_affine_predict(weights, X)
proj_obj_val = 0.5 * (np.sum(np.square(y - yhat)) - y.dot(y))
return PCNLSEstimatorModel(
weights=weights,
nqpiter=nqpiter,
seconds=(timer() - start),
obj_val=obj_val,
proj_obj_val=proj_obj_val,
max_viol=max_viol,
regularizer=regularizer,
dual_vars=dual_vars,
)
def _add_L_to_Ab(L, K, d, A_data, A_rows, A_cols, row_idx):
"""Adding the Lipschitz constraints to the end of the constraint parameters A and b."""
d1 = d + 1
if L is not None:
for k in range(K):
col0 = k * d1 + 1
for l in range(d):
A_data += [1.0, -1.0]
A_rows += [row_idx, row_idx + 1]
row_idx += 2
col = col0 + l
A_cols += [col, col]
b = np.zeros(row_idx)
if L is not None:
b[-2*K*d:] = L
return A_data, A_rows, A_cols, row_idx, b
def pcnls_qp_data(
X, y, partition,
regularizer=0.0, L=None,
backend=QP_BACKEND__DEFAULT,
):
"""Constructing max-affine convex regression matrices for quadratic programming (QP).
QP format: 0.5*(w'*H*w) + g'*w + 0.5*regularizer*(w'*w), s.t. A*w <= b and max_i|w[i]| <= L.
:param X: data matrix (each row is a sample, without augmented leading 1s)
:param y: target vector
:param partition: induced partition by the considered max-affine functions
:param regularizer: ridge regression regularizer
:param L: maximum Lipschitz constant (as the max-norm of the gradients)
:param backend: quadratic programming solver
:return: QP parameters H, g, A, b, and the constraint row index for each cell
>>> from common.partition import singleton_partition
>>> X = np.array([[1.1, 1.1], [-1.2, 1.2], [-1.3, -1.3], [0.4, 0.4], [1.5, -1.5]])
>>> y = np.array([1.1, 1.2, 1.3, 0.4, 0.5])
>>> p = singleton_partition(len(y))
>>> H, g, A, b, cell_idx = pcnls_qp_data(X, y, p, regularizer=0.1)
>>> cell_idx
array([ 0, 4, 8, 12, 16])
>>> H.shape
(15, 15)
>>> np.linalg.matrix_rank(H.toarray())
15
>>> H.nnz
45
>>> H.toarray()[:, :9]
array([[ 1. , 1.1 , 1.1 , 0. , 0. , 0. , 0. , 0. , 0. ],
[ 1.1 , 1.31, 1.21, 0. , 0. , 0. , 0. , 0. , 0. ],
[ 1.1 , 1.21, 1.31, 0. , 0. , 0. , 0. , 0. , 0. ],
[ 0. , 0. , 0. , 1. , -1.2 , 1.2 , 0. , 0. , 0. ],
[ 0. , 0. , 0. , -1.2 , 1.54, -1.44, 0. , 0. , 0. ],
[ 0. , 0. , 0. , 1.2 , -1.44, 1.54, 0. , 0. , 0. ],
[ 0. , 0. , 0. , 0. , 0. , 0. , 1. , -1.3 , -1.3 ],
[ 0. , 0. , 0. , 0. , 0. , 0. , -1.3 , 1.79, 1.69],
[ 0. , 0. , 0. , 0. , 0. , 0. , -1.3 , 1.69, 1.79],
[ 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ],
[ 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ],
[ 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ],
[ 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ],
[ 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ],
[ 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ]])
>>> g
array([-1.1 , -1.21, -1.21, -1.2 , 1.44, -1.44, -1.3 , 1.69, 1.69,
-0.4 , -0.16, -0.16, -0.5 , -0.75, 0.75])
>>> A.shape
(20, 15)
>>> np.linalg.matrix_rank(A.toarray())
12
>>> A.toarray()[:, :9]
array([[-1. , -1.1, -1.1, 1. , 1.1, 1.1, 0. , 0. , 0. ],
[-1. , -1.1, -1.1, 0. , 0. , 0. , 1. , 1.1, 1.1],
[-1. , -1.1, -1.1, 0. , 0. , 0. , 0. , 0. , 0. ],
[-1. , -1.1, -1.1, 0. , 0. , 0. , 0. , 0. , 0. ],
[ 1. , -1.2, 1.2, -1. , 1.2, -1.2, 0. , 0. , 0. ],
[ 0. , 0. , 0. , -1. , 1.2, -1.2, 1. , -1.2, 1.2],
[ 0. , 0. , 0. , -1. , 1.2, -1.2, 0. , 0. , 0. ],
[ 0. , 0. , 0. , -1. , 1.2, -1.2, 0. , 0. , 0. ],
[ 1. , -1.3, -1.3, 0. , 0. , 0. , -1. , 1.3, 1.3],
[ 0. , 0. , 0. , 1. , -1.3, -1.3, -1. , 1.3, 1.3],
[ 0. , 0. , 0. , 0. , 0. , 0. , -1. , 1.3, 1.3],
[ 0. , 0. , 0. , 0. , 0. , 0. , -1. , 1.3, 1.3],
[ 1. , 0.4, 0.4, 0. , 0. , 0. , 0. , 0. , 0. ],
[ 0. , 0. , 0. , 1. , 0.4, 0.4, 0. , 0. , 0. ],
[ 0. , 0. , 0. , 0. , 0. , 0. , 1. , 0.4, 0.4],
[ 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ],
[ 1. , 1.5, -1.5, 0. , 0. , 0. , 0. , 0. , 0. ],
[ 0. , 0. , 0. , 1. , 1.5, -1.5, 0. , 0. , 0. ],
[ 0. , 0. , 0. , 0. , 0. , 0. , 1. , 1.5, -1.5],
[ 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ]])
>>> A.nnz
120
>>> b.shape
(20,)
>>> np.sum(np.abs(b))
0.0
>>> H, g, A, b, cell_idx = pcnls_qp_data(X, y, p, L=5.0)
>>> A.toarray()[:33, :9]
array([[-1. , -1.1, -1.1, 1. , 1.1, 1.1, 0. , 0. , 0. ],
[-1. , -1.1, -1.1, 0. , 0. , 0. , 1. , 1.1, 1.1],
[-1. , -1.1, -1.1, 0. , 0. , 0. , 0. , 0. , 0. ],
[-1. , -1.1, -1.1, 0. , 0. , 0. , 0. , 0. , 0. ],
[ 1. , -1.2, 1.2, -1. , 1.2, -1.2, 0. , 0. , 0. ],
[ 0. , 0. , 0. , -1. , 1.2, -1.2, 1. , -1.2, 1.2],
[ 0. , 0. , 0. , -1. , 1.2, -1.2, 0. , 0. , 0. ],
[ 0. , 0. , 0. , -1. , 1.2, -1.2, 0. , 0. , 0. ],
[ 1. , -1.3, -1.3, 0. , 0. , 0. , -1. , 1.3, 1.3],
[ 0. , 0. , 0. , 1. , -1.3, -1.3, -1. , 1.3, 1.3],
[ 0. , 0. , 0. , 0. , 0. , 0. , -1. , 1.3, 1.3],
[ 0. , 0. , 0. , 0. , 0. , 0. , -1. , 1.3, 1.3],
[ 1. , 0.4, 0.4, 0. , 0. , 0. , 0. , 0. , 0. ],
[ 0. , 0. , 0. , 1. , 0.4, 0.4, 0. , 0. , 0. ],
[ 0. , 0. , 0. , 0. , 0. , 0. , 1. , 0.4, 0.4],
[ 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ],
[ 1. , 1.5, -1.5, 0. , 0. , 0. , 0. , 0. , 0. ],
[ 0. , 0. , 0. , 1. , 1.5, -1.5, 0. , 0. , 0. ],
[ 0. , 0. , 0. , 0. , 0. , 0. , 1. , 1.5, -1.5],
[ 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ],
[ 0. , 1. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ],
[ 0. , -1. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ],
[ 0. , 0. , 1. , 0. , 0. , 0. , 0. , 0. , 0. ],
[ 0. , 0. , -1. , 0. , 0. , 0. , 0. , 0. , 0. ],
[ 0. , 0. , 0. , 0. , 1. , 0. , 0. , 0. , 0. ],
[ 0. , 0. , 0. , 0. , -1. , 0. , 0. , 0. , 0. ],
[ 0. , 0. , 0. , 0. , 0. , 1. , 0. , 0. , 0. ],
[ 0. , 0. , 0. , 0. , 0. , -1. , 0. , 0. , 0. ],
[ 0. , 0. , 0. , 0. , 0. , 0. , 0. , 1. , 0. ],
[ 0. , 0. , 0. , 0. , 0. , 0. , 0. , -1. , 0. ],
[ 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 1. ],
[ 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , -1. ],
[ 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ]])
>>> b
array([0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 5., 5., 5., 5., 5., 5., 5., 5., 5., 5., 5., 5., 5., 5.,
5., 5., 5., 5., 5., 5.])
"""
n, d = X.shape
K = partition.ncells
assert n == partition.npoints
assert n >= K, 'Too few data points, n: {}, K: {}'.format(n, K)
assert n > d, 'Too few data points, n: {}, d: {}'.format(n, d)
if y.shape == (n, 1):
y = y[:, 0]
assert y.shape == (n,), 'Invalid y.shape: {}'.format(y.shape)
X = np.insert(X, 0, 1.0, axis=1)
d1 = d+1 # bias extended input dimension
assert X.shape == (n, d1)
nvars = K*d1 # number of variables of QP
regmat = None
if regularizer > 0.0:
regmat = regularizer * np.eye(d1)
regmat[0, 0] = 0.0
H_mats = []
g_mats = []
A_data = []
A_rows = []
A_cols = []
row_idx = 0
cell_idx = []
for j, cell_j in enumerate(partition.cells):
cell_idx.append(row_idx)
cellX = X[cell_j, :]
cell_size = cellX.shape[0]
cellXX = np.dot(cellX.transpose(), cellX)
if regmat is not None:
cellXX += regmat
H_mats.append(cellXX)
g_mats.append(-np.dot(cellX.transpose(), y[cell_j]))
data = list(np.hstack((cellX, -cellX)).flatten())
row_offsets = np.kron(range(cell_size), np.ones(2*d1))
col_j = [j*d1+offset for offset in range(d1)]
for k in range(K):
if k == j:
continue
rows = [row_idx + row for row in row_offsets]
row_idx += cell_size
col_k = [k*d1+offset for offset in range(d1)]
A_data += data
A_rows += rows
A_cols += list(np.kron(col_k + col_j, | np.ones((cell_size, 1)) | numpy.ones |
import h5py
import numpy as np
import numpy.ma as ma
import numpy.lib.recfunctions as rfn
import logging
ref_region_dtype = np.dtype([('start','i8'), ('stop','i8')])
def print_ref(grp):
'''
Print out all references in file (or group)
'''
l = list()
grp.visititems(lambda n,d: l.append((n,d))\
if n.endswith('/ref') and isinstance(d,h5py.Dataset)
else None
)
if not len(l):
return
max_length = max([len(n) for n,d in l])
for n,d in l:
print(n+' '*(max_length-len(n))+' '+str(d))
def print_data(grp):
'''
Print out all datasets in file (or group)
'''
l = list()
grp.visititems(lambda n,d: l.append((n,d))\
if n.endswith('/data') and isinstance(d,h5py.Dataset)
else None
)
if not len(l):
return
max_length = max([len(n) for n,d in l])
for n,d in l:
print(n+' '*(max_length-len(n))+' '+str(d))
def print_attr(grp):
'''
Print out all attributes in file (or group)
'''
l = list()
grp.visititems(lambda n,d: l.append((n,d.attrs))\
if len(d.attrs) and not (n.endswith('/ref') or n.endswith('/ref_region'))\
else None
)
if not len(l):
return
max_length = max([len(k) for n,d in l for k in d])
for n,d in l:
print(n)
for k,v in d.items():
print('\t'+k+':'+' '*(max_length-len(k))+' '+str(v))
def dereference_chain(sel, refs, data=None, regions=None, mask=None, ref_directions=None, indices_only=False):
'''
Load a "chain" of references. Allows traversal of multiple layers of references,
e.g. for three datasets ``A``, ``B``, and ``C`` linked ``A->B->C``. One
can use a selection in ``A`` and load the ``C`` data associated with it.
Example usage::
sel = slice(0,100)
refs = [f['A/ref/B/ref'], f['C/ref/B/ref']]
ref_dirs = [(0,1), (1,0)]
data = f['C/data']
regions = [f['A/ref/B/ref_region'], f['B/ref/C/ref_region']]
mask = np.r_[sel] > 50
c_data = dereference_chain(sel, refs, data, regions=regions, mask=mask, ref_directions=ref_dirs)
c_data.shape # (100, max_a2b_assoc, max_b2c_assoc)
:param sel: iterable of indices, a slice, or an integer, see ``sel`` argument in ``dereference``
:param refs: a list of reference datasets to load, in order, see ``ref`` argument in ``dereference``
:param data: a dataset to load dereferenced data from, optional if ``indices_only=True``
:param regions: lookup table into ``refs`` for each selection, see ``region`` argument in ``dereference``
:param mask: a boolean mask into the first selection, true will not load the entry
:param ref_directions: intepretation of reference datasets, see ``ref_direction`` argument in ``dereference``
:param indices_only: flag to skip loading the data and instead just return indices into the final dataset
'''
sel = np.r_[sel]
mask = np.zeros_like(sel, dtype=bool) | (mask if mask is not None else False)
sel = ma.array(sel, mask=mask, shrink=False)
shape = (len(sel),)
dref = None
nsteps = len(refs)
for i in range(nsteps):
dset = data if i == nsteps-1 else None
ref = refs[i]
ref_dir = ref_directions[i] if ref_directions else (0,1) # default to (0,1)
reg = regions[i] if regions else None
dref = dereference(sel.data.ravel(), ref,
data=dset, region=reg,
mask=mask.ravel(), ref_direction=ref_dir,
indices_only=True if i != nsteps-1 else indices_only)
shape += dref.shape[-1:]
mask = np.expand_dims(mask, axis=-1) | \
(rfn.structured_to_unstructured(dref.mask).any(axis=-1).reshape(shape) \
if dref.mask.dtype.kind == 'V' else dref.mask.reshape(shape))
dref = ma.array(dref.data.reshape(shape), mask=mask, shrink=False)
if i != nsteps-1:
sel = dref
return dref
def dereference(sel, ref, data=None, region=None, mask=None, ref_direction=(0,1), indices_only=False, as_masked=True):
'''
Load ``data`` referred to by ``ref`` that corresponds to the desired
positions specified in ``sel``.
:param sel: iterable of indices, an index, or a ``slice`` to match against ``ref[:,ref_direction[0]]``. Return value will have same first dimension as ``sel``, e.g. ``dereference(slice(100), ref, data).shape[0] == 100``
:param ref: a shape (N,2) ``h5py.Dataset`` or array of pairs of indices linking ``sel`` and ``data``
:param data: a ``h5py.Dataset`` or array to load dereferenced data from, can be omitted if ``indices_only==True``
:param region: a 1D ``h5py.Dataset`` or array with a structured array type of [('start','i8'), ('stop','i8')]; 'start' defines the earliest index within the ``ref`` dataset for each value in ``sel``, and 'stop' defines the last index + 1 within the ``ref`` dataset (optional). If a ``h5py.Dataset`` is used, the ``sel`` spec will be used to load data from the dataset (i.e. ``region[sel]``), otherwise ``len(sel) == len(region)`` and a 1:1 correspondence is assumed
:param mask: mask off specific items in selection (boolean, True == don't dereference selection), len(mask) == len(np.r_[sel])
:param ref_direction: defines how to interpret second dimension of ``ref``. ``ref[:,ref_direction[0]]`` are matched against items in ``sel``, and ``ref[:,ref_direction[1]]`` are indices into the ``data`` array (``default=(0,1)``). So for a simple example: ``dereference([0,1,2], [[1,0], [2,1]], ['A','B','C','D'], ref_direction=(0,1))`` returns an array equivalent to ``[[],['A'],['B']]`` and ``dereference([0,1,2], [[1,0], [2,1]], ['A','B','C','D'], ref_direction=(1,0))`` returns an array equivalent to ``[['B'],['C'],[]]``
:param indices_only: if ``True``, only returns the indices into ``data``, does not fetch data from ``data``
:returns: ``numpy`` masked array (or if ``as_masked=False`` a ``list``) of length equivalent to ``sel``
'''
# set up selection
sel_mask = mask
sel_idcs = np.r_[sel][~sel_mask] if sel_mask is not None else np.r_[sel]
n_elem = len(sel_idcs) if sel_mask is None else len(sel_mask)
return_dtype = data.dtype if not indices_only else ref.dtype
if not len(sel_idcs) and n_elem:
# special case for if there is nothing selected in the mask
if as_masked:
return ma.array(np.empty((n_elem,1), dtype=return_dtype), mask=True, shrink=False)
else:
return [np.empty(0, data.dtype) for _ in range(n_elem)]
elif not len(sel_idcs):
if as_masked:
return ma.array(np.empty((0,1), dtype=return_dtype), mask=True, shrink=False)
else:
return []
# load fast region lookup
if region is not None:
if isinstance(region, h5py.Dataset):
if isinstance(sel, slice):
region = region[sel] # load parent reference region information
else:
region_offset = np.min(sel_idcs)
region_sel = slice(region_offset, int(np.max(sel_idcs)+1))
region = region[region_sel][sel_idcs - region_offset]
else:
region = region[sel_idcs]
# load relevant references
region_valid = region['start'] != region['stop'] if region is not None else None
if not region is None and np.count_nonzero(region_valid) == 0:
# special case for if there are no valid references
if as_masked:
return ma.array(np.empty((n_elem,1), dtype=return_dtype), mask=True)
else:
return [np.empty(0, return_dtype) for _ in range(n_elem)]
ref_offset = np.min(region[region_valid]['start']) if region is not None else 0
ref_sel = slice(ref_offset, int(np.max(region[region_valid]['stop']))) if region is not None else slice(ref_offset,len(ref))
ref = ref[ref_sel]
# if no valid references, return
if len(ref) == 0:
if as_masked:
return ma.array(np.empty((n_elem,1), dtype=return_dtype), mask=True)
else:
return [np.empty(0, return_dtype) for _ in range(n_elem)]
# load relevant data
dset_offset = np.min(ref[:,ref_direction[1]])
dset_sel = slice(dset_offset, int(np.max(ref[:,ref_direction[1]])+1))
dset = data[dset_sel] if not indices_only else None # load child dataset region
# create a region array, if one was not given
if region is None:
region = np.zeros(len(sel_idcs), dtype=ref_region_dtype)
region['start'] = ref_sel.start
region['stop'] = ref_sel.stop
if not as_masked:
# dump into list using subregion masks
if indices_only:
indices = [
ref[st:sp,ref_direction[1]][ (ref[st:sp,ref_direction[0]] == i) ]
for i,st,sp in zip(sel_idcs, region['start']-ref_offset, region['stop']-ref_offset)
]
return indices
else:
data = [
dset[ref[st:sp,ref_direction[1]][ (ref[st:sp,ref_direction[0]] == i) ] - dset_offset]
for i,st,sp in zip(sel_idcs, region['start']-ref_offset, region['stop']-ref_offset)
]
return data
# the rest of this is index manipulation to convert from sel -> ref -> data
# first using only the unique references and then casting it back into the
# original selection
# first get mapping from unique selection back into the selection
uniq_sel, uniq_inv = np.unique(sel_idcs, return_inverse=True)
# only use references that are relevant to the selection
ref_mask = np.isin(ref[:,ref_direction[0]], uniq_sel)
if not np.any(ref_mask):
# special case if no valid references for selection
return ma.array( | np.empty((n_elem,1), dtype=return_dtype) | numpy.empty |
import numpy as np
import scipy.ndimage as ndi
import SimpleITK
import warnings
from skimage.measure import regionprops
import cv2
CORRECTION = 255
def nodule_size(nodule):
tmp = nodule.copy()
tmp[tmp != 0] = 255
tmp = tmp.astype(int)
properties = regionprops(tmp)
width = 0.0
height = 0.0
for p in properties:
min_row, min_col, max_row, max_col = p.bbox
width = max_row - min_row
height = max_col - min_col
return width, height
def get_nodule_diameter(seg_image):
seg_image = np.mean(seg_image, axis=1)
seg_image[seg_image != 0] = 255
seg_image = seg_image.astype(int)
properties = regionprops(seg_image)
for p in properties:
min_row, min_col, max_row, max_col = p.bbox
diameter = max(max_row - min_row, max_col - min_col)
return diameter
def generate_2d(X_ct, p_lambda=0.85):
'''
Generate 2D digitally reconstructed radiographs from CT scan. (DRR, fake CXR, simulated CXR)
X_ct: CT scan
p-lambda: β controls the boosting of X-ray absorption as the tissue density increases.
We have chosen β=0.85 for our experiments after performing a visual comparison with real chest X-rays.
'''
X_ct[X_ct > 400] = 400
X_ct[X_ct < -500] = -500
X_ct += 1024
# 1424 524 698.748232
X_ct = X_ct / 1000.0
X_ct *= p_lambda
X_ct[X_ct > 1] = 1
# 1.0 0.4454 0.5866707652
X_ct_2d = np.mean(np.exp(X_ct), axis=1)
return X_ct_2d
def resample(image, voxel_spacing, new_spacing=None, new_shape=None, order=1):
""" Resamples the scan according to the either new spacing or new shape
When new_spacing and new_shape are provided, new_shape has the priority
use order = 1 for nearest neighbor and order = 3 for cubic interpolation
@author: <NAME>+ <NAME>
"""
assert new_spacing is not None or new_shape is not None
if np.dtype(image[0, 0, 0]) is np.dtype(np.int16) and np.min(
image) < 0 and np.max(image) > 50 and order == 1:
warnings.warn(
"Order 1 selected for image that looks as a scan, try using order 3"
)
if np.dtype(image[0, 0, 0]) in [
np.dtype(np.uint8), np.dtype(np.int16)
] and np.min(image) == 0 and | np.max(image) | numpy.max |
#!/usr/bin/env python
import numpy as np
from time import time
import pyfftw
from numpy.fft import fft, ifft, fftshift, ifftshift, fft2, ifft2
from scipy.special import jv as besselj
import finufftpy
def translations_brute_force(Shathat, Mhat, cmul_trans):
# Shathat: (q, te, k)
# Mhat: (im, k × γ)
# cmul_trans: (tr, k × γ)
n_trans = cmul_trans.shape[-2]
n_images = Mhat.shape[-2]
Shathat = Shathat.transpose((2, 0, 1))
# Shathat: (q, te, k)
n_templates = Shathat.shape[-2]
ngridr = Shathat.shape[-1]
n_gamma = Shathat.shape[-3]
Mhat = Mhat.reshape((n_images, ngridr, n_gamma))
cmul_trans = cmul_trans.reshape((n_trans, ngridr, n_gamma))
# Mhat: (im, k, γ)
# cmul_trans: (tr, k, γ)
Mhat = Mhat[:, np.newaxis, :, :]
cmul_trans = cmul_trans[np.newaxis, :, :, :]
# Mhat: (im, 1, k, γ)
# cmul_trans: (1, tr, k, γ)
Mhat = Mhat.transpose((3, 2, 0, 1)).copy()
cmul_trans = cmul_trans.transpose((3, 2, 0, 1)).copy()
# Mhat: (γ, k, im, 1)
# cmul_trans: (γ, k, 1, tr)
Mhat_trans = pyfftw.empty_aligned((n_gamma, ngridr, n_images, n_trans),
dtype='complex128')
# Mhat_trans: (γ, k, im × tr)
plan = pyfftw.FFTW(Mhat_trans, Mhat_trans, axes=(0,),
direction='FFTW_FORWARD', flags=('FFTW_ESTIMATE',), threads=12)
tmr_start = time()
np.multiply(Mhat, cmul_trans, out=Mhat_trans)
plan()
Mhathat_trans = Mhat_trans.reshape((n_gamma, ngridr, n_images * n_trans))
# Mhathat_trans: (q, k, im × tr)
ptm = time() - tmr_start
tmr_start = time()
c_n2 = np.zeros((n_gamma, n_templates, n_images*n_trans),
dtype=np.complex128)
# c_n2: (q, te, im × tr)
for k1 in range(n_gamma):
k1p = (k1 + n_gamma // 2) % n_gamma
c_n2[k1, :, :] = np.matmul(np.conj(Shathat[k1p, :, :]), Mhathat_trans[k1, :, :])
c_n2 = 2 * np.pi * c_n2
c_n2 = ifft(c_n2, axis=0)
# c_n2: (γ, te, im × tr)
c_n2 = c_n2.reshape((n_gamma, n_templates, n_images, n_trans))
c_n2 = np.real(c_n2)
# c_n2: (γ, te, im, tr)
tm = time() - tmr_start
return c_n2, ptm, tm
def translations_brute_force_batch(Shathat, Mhat, pf_grid, tr_grid, n_psi,
n_batch_im=None, n_batch_trans=500):
n_templates = Shathat.shape[0]
n_images = Mhat.shape[0]
trans = tr_grid['trans']
n_trans = tr_grid['n_trans']
if n_batch_im is None:
n_batch_im = n_images
n_batch_trans = min(n_batch_trans, n_trans)
zprods1 = np.zeros((n_psi, n_templates, n_images, n_trans))
# zprods1: (γ, te, im, tr)
tm1 = 0
precomp1 = 0
for cn in range(0, n_images, n_batch_im):
idx_im = range(cn, min(cn + n_batch_im, n_images))
for ttt in range(0, n_trans, n_batch_trans):
idx_trans = range(ttt, min(ttt + n_batch_trans, n_trans))
cmul_trans = pft_phase_shift(-trans[idx_trans, :], pf_grid)
# cmul_trans: (tr, k × γ)
tmp, ptm, tm = translations_brute_force(
Shathat, Mhat[idx_im, :], cmul_trans)
zprods1[np.ix_(range(n_psi),
range(n_templates),
idx_im,
idx_trans)] = tmp
precomp1 += ptm
tm1 += tm
zprods1 = zprods1.transpose((2, 1, 0, 3))
return zprods1, precomp1, tm1
def svd_decomposition_alignment(SSS, Mhat, n_bessel, all_rnks, BigMul_left):
ngridr = SSS.shape[-1]
n_templates = SSS.shape[-2]
n_gamma = SSS.shape[-3]
n_images = Mhat.shape[-2]
n_trans = BigMul_left.shape[-1]
tmr_start = time()
Mhathat = Mhat.reshape((n_images, ngridr, n_gamma))
Mhathat = fftshift(fft(Mhathat, axis=-1), axes=-1) / n_gamma
MMM = np.zeros((n_images, 2 * n_bessel + 1, ngridr, n_gamma),
dtype=np.complex128)
for im in range(n_images):
for qp in range(-n_bessel, n_bessel + 1):
tmp = Mhathat[im, :, :]
MMM[im, qp + n_bessel, :, :] = np.roll(tmp, -qp, axis=-1)
MMM = MMM.transpose((1, 3, 2, 0)).copy()
precomp2 = time() - tmr_start
tmr_start = time()
BigMul_right = np.zeros((sum(all_rnks), n_gamma, n_templates, n_images),
dtype=np.complex128)
for qp in range(-n_bessel, n_bessel + 1):
rnk = all_rnks[qp + n_bessel]
ofst = sum(all_rnks[:qp + n_bessel])
for ll in range(rnk):
for q in range(n_gamma):
tmp = np.matmul(SSS[ofst + ll, q, :, :],
MMM[qp + n_bessel, q, :, :])
BigMul_right[ofst + ll, q, :, :] = tmp
BigMul_right = BigMul_right.transpose((3, 2, 1, 0)).copy()
c_n = np.zeros((n_images, n_templates, n_gamma, n_trans),
dtype=np.complex128)
for im in range(n_images):
for tt in range(n_templates):
c_n[im, tt, :, :] = np.matmul(BigMul_right[im, tt, :, :],
BigMul_left)
c_n = 2 * np.pi * c_n
zprods = ifft(ifftshift(c_n, axes=-2), axis=-2) * n_gamma
tm2 = time() - tmr_start
return zprods, precomp2, tm2
def cartesian_to_pft(templates, T, pf_grid):
xnodesr = pf_grid['xnodesr']
n_psi = pf_grid['n_psi']
ngridr = xnodesr.shape[0]
n_templates = templates.shape[0]
N = templates.shape[1]
dx = T / N
dy = T / N
wx = pf_grid['wx']
wy = pf_grid['wy']
Shat = np.zeros((n_templates, ngridr * n_psi), dtype=np.complex128)
upsampfac = 1.25
fcc = np.empty(len(wx), dtype=np.complex128)
for k in range(n_templates):
template = templates[k, :, :]
# Need to force Fortran ordering because that's what the FINUFFT
# interface expects.
gg = np.asfortranarray(template.transpose((1, 0)))
isign = -1
eps = 1e-6
# Note: Crashes if gg is a 1D vector (raveled). Why?
finufftpy.nufft2d2(wx * dx, wy * dy, fcc,
isign, eps, gg, upsampfac=upsampfac)
Shat[k, :] = fcc
return Shat
def pft_to_cartesian(Shat, T, N, pf_grid):
xnodesr = pf_grid['xnodesr']
n_psi = pf_grid['n_psi']
quad_wts = pf_grid['quad_wts']
ngridr = xnodesr.shape[0]
n_templates = Shat.shape[0]
dx = T / N
dy = T / N
wx = pf_grid['wx']
wy = pf_grid['wy']
templates1 = np.zeros((n_templates, N, N))
# Again, Fortran ordering is necessary for FINUFFT.
gxx = np.empty((N, N), dtype=np.complex128, order='F')
upsampfac = 1.25
for k in range(n_templates):
fcc1 = Shat[k, :] * quad_wts
isign = 1
eps = 1e-6
finufftpy.nufft2d1(wx * dx, wy * dy, fcc1, isign, eps, N, N, gxx,
upsampfac=upsampfac)
gxx = gxx*dx*dy/(4*np.pi**2)
templates1[k, :, :] = np.real(gxx.transpose((1, 0)))
return templates1
def rotate_pft(fcc, rgamma, pf_grid):
xnodesr = pf_grid['xnodesr']
n_psi = pf_grid['n_psi']
ngridr = xnodesr.shape[0]
ngridc = n_psi * np.ones(ngridr, dtype=np.int32)
fcc_rot = np.zeros(fcc.shape, dtype=np.complex128)
cnt = 0
for rr in range(ngridr):
tmp = fcc[:, cnt:cnt + ngridc[rr]]
ffcc = fft(tmp)
n_theta = ngridc[rr]
wth = ifftshift(np.arange(-n_theta/2, n_theta/2))
mul = np.exp(-1j * wth * rgamma[:, np.newaxis])
ffcc_rot = ffcc * mul
tmp = ifft(ffcc_rot)
fcc_rot[:, cnt:cnt + ngridc[rr]] = tmp
cnt += ngridc[rr]
return fcc_rot
def pft_phase_shift(sh, pf_grid):
all_psi = pf_grid['all_psi']
quad_xnodesr = pf_grid['all_r']
phase = (np.cos(all_psi) * sh[:, np.newaxis, 0]
+ np.sin(all_psi) * sh[:, np.newaxis, 1])
cmul = np.exp(-1j * quad_xnodesr * phase)
return cmul
def translate_pft(fcc, sh, pf_grid):
cmul = pft_phase_shift(sh, pf_grid)
return fcc * cmul
def pft_norm(Mhat, pf_grid):
quad_wts = pf_grid['quad_wts']
return np.sqrt(np.sum((np.abs(Mhat) ** 2) * quad_wts, axis=-1))
def pft_to_fb(Shat, pf_grid):
ngridr = pf_grid['ngridr']
n_psi = pf_grid['n_psi']
quad_wts = pf_grid['quad_wts']
n_templates = Shat.shape[0]
quad_wts_sq = quad_wts.reshape((ngridr, n_psi))
Shathat = Shat.reshape((n_templates, ngridr, n_psi))
# Shathat: (te, k, γ)
Shathat = np.fft.fftshift(np.fft.fft(Shathat, axis=-1), axes=-1)
Shathat = Shathat * quad_wts_sq[np.newaxis, :, :]
# Shathat: (te, k, q)
# There was a 2π factor missing before. Let's remove it.
Shathat = Shathat / (2 * np.pi)
return Shathat
def make_tensor_grid(rmax, ngridr, n_psi):
dr = rmax/ngridr
xnodesr = dr*np.arange(1, ngridr+1)
weights = dr*np.ones(ngridr)
psi = 2 * np.pi / n_psi * np.arange(n_psi)
all_psi = np.repeat(psi[np.newaxis, :], ngridr, axis=0)
all_psi = np.ravel(all_psi)
all_r = np.repeat(xnodesr[:, np.newaxis], n_psi, axis=1)
all_r = np.ravel(all_r)
wts_theta = 2 * np.pi / n_psi
quad_wts = wts_theta * xnodesr * weights
quad_wts = np.repeat(quad_wts[:, np.newaxis], n_psi, axis=-1)
quad_wts = np.ravel(quad_wts)
wx = np.zeros(n_psi * ngridr)
wy = np.zeros(n_psi * ngridr)
cnt = 0
for rr in range(ngridr):
dd = xnodesr[rr]
theta = 2 * np.pi / n_psi * np.arange(n_psi)
wx[cnt:cnt + n_psi] = dd * np.cos(theta)
wy[cnt:cnt + n_psi] = dd * np.sin(theta)
cnt = cnt + n_psi
grid = dict()
grid['rmax'] = rmax
grid['ngridr'] = ngridr
grid['n_psi'] = n_psi
grid['xnodesr'] = xnodesr
grid['all_psi'] = all_psi
grid['all_r'] = all_r
grid['quad_wts'] = quad_wts
grid['wx'] = wx
grid['wy'] = wy
return grid
def make_adaptive_grid(delta_range, dx, oversampling):
all_delta = dx / oversampling * np.arange(oversampling * delta_range + 1e-10)
n_delta = all_delta.shape[0]
n_omega = oversampling * np.int32(np.ceil(2 * np.pi / dx * all_delta))
n_trans = np.sum(n_omega)
trans = np.zeros((n_trans, 2))
cnt = 0
for kk in range(n_delta):
n_om = n_omega[kk]
all_om = 2 * np.pi * np.arange(n_om) / n_om
trans[cnt:cnt + n_om, 0] = all_delta[kk] * np.cos(all_om)
trans[cnt:cnt + n_om, 1] = all_delta[kk] * np.sin(all_om)
cnt += n_om
grid = dict()
grid['all_delta'] = all_delta
grid['n_delta'] = n_delta
grid['n_omega'] = n_omega
grid['n_trans'] = n_trans
grid['trans'] = trans
return grid
def make_cartesian_grid(delta_range, dx, oversampling):
Nkeep = 2 * oversampling * delta_range
xfine = dx * np.arange(-Nkeep // 2, Nkeep // 2)
trans = xfine
trans = np.meshgrid(trans, trans, indexing='ij')
trans = np.stack(trans[::-1], -1)
trans = trans.reshape((Nkeep ** 2, 2))
grid = {'n_trans': Nkeep ** 2, 'trans': trans}
return grid
def extract_alignments(inner_prods3, tr_grid):
n_images = inner_prods3.shape[0]
n_templates = inner_prods3.shape[1]
n_psi = inner_prods3.shape[2]
n_trans = inner_prods3.shape[3]
trans = tr_grid['trans']
inner_prods3 = inner_prods3.reshape((n_images,
n_templates*n_psi*n_trans))
est_template_ind = np.zeros(n_images, dtype=np.int32)
est_trans = np.zeros((n_images, 2))
est_gamma = np.zeros(n_images)
idx = inner_prods3.argmax(axis=-1)
for cn in range(n_images):
I3, I2, I1 = np.unravel_index(idx[cn],
(n_templates, n_psi, n_trans))
shiftx = trans[I1, 0]
shifty = trans[I1, 1]
rgamma = I2 * 2 * np.pi / n_psi
est_template_ind[cn] = I3
est_trans[cn, 0] = shiftx
est_trans[cn, 1] = shifty
est_gamma[cn] = rgamma
return est_template_ind, est_trans, est_gamma
def rotations_brute_force(fimages, Shat, n_gamma, pf_grid, Nfine):
eval_results = False
if Shat.ndim == 2:
Shat = Shat[np.newaxis, :, :]
n_images, N, _ = fimages.shape
n_templates, ngridr, ngridp = Shat.shape
quad_wts_sq = pf_grid['quad_wts'].reshape((ngridr, ngridp))
wx = pf_grid['wx']
wy = pf_grid['wy']
all_gamma = 2 * np.pi / n_gamma * np.arange(n_gamma)
tmr_start = time()
Shathat = fft(Shat) / ngridp
# Shat: (te, k, γ)
# Shathat: (te, k, q)
Shathat = Shathat.reshape((n_templates, 1, ngridr, ngridp))
# Shathat: (te, 1, k, q)
wth = ifftshift(np.arange(-ngridp / 2, ngridp / 2))
mul = np.exp(-1j * wth[np.newaxis, :] * all_gamma[:,np.newaxis])
# mul: (γ, q)
Shathat_rot = Shathat * mul[:, np.newaxis, :]
# Shathat_rot: (te, γ, k, q)
# NOTE: This can be sped up by using PyFFTW. However, for the execution to
# be efficent, the plan must be created using FFTW_MEASURE, which takes a
# long time. The solution will be to separate this our to the BFR
# “planning” stage for some fixed number of images–template pairs, then
# loop over these, computing the IFFT batchwise at execution (since the
# exact number of pairs is not known as planning time).
Shat_rot = ifft(Shathat_rot)
fx1 = quad_wts_sq * Shat_rot
T = 2
dx = dy = T / N
templates_rot = np.empty((N, N, n_gamma, n_templates),
dtype=np.complex128, order='F')
upsampfac = 1.25
isign = 1
eps = 1e-2
finufftpy.nufft2d1many(wx * dx, wy * dy, fx1, isign, eps, N, N,
templates_rot, upsampfac=upsampfac)
templates_rot = templates_rot / (4 * np.pi ** 2)
# templates_rot: (trx, try, γ, te)
templates_rot = templates_rot.transpose((3, 2, 1, 0)).copy()
# templates_rot: (te, γ, try, trx)
ftemplates_rot = fft2(ifftshift(templates_rot, axes=(-2, -1)))
# ftemplates_rot: (te, γ, trky, trkx)
precomp = time() - tmr_start
tmr_start = time()
ftemplates_rot = ftemplates_rot[:, np.newaxis, :, :, :]
# ftemplates_rot: (te, im, γ, trky, trkx)
fxx = fimages[:, np.newaxis, :, :] * np.conj(ftemplates_rot)
# ftemplates_rot: (te, im, γ, trky, trkx)
inner_prods = pyfftw.zeros_aligned((n_templates, n_images, n_gamma, Nfine, Nfine), dtype='complex128')
inner_prods[:, :, :, :N // 2, :N // 2] = fxx[:, :, :, :N // 2, :N // 2]
inner_prods[:, :, :, :N // 2, -N // 2:] = fxx[:, :, :, :N // 2, -N // 2:]
inner_prods[:, :, :, -N // 2:, :N // 2] = fxx[:, :, :, -N // 2:, :N // 2]
inner_prods[:, :, :, -N // 2:, -N // 2:] = fxx[:, :, :, -N // 2:, -N // 2:]
plan = pyfftw.FFTW(inner_prods, inner_prods, axes=(-2, -1),
direction='FFTW_BACKWARD',
flags=('FFTW_MEASURE',), threads=12)
plan()
inner_prods = np.real(inner_prods)
inner_prods *= (Nfine / N) ** 2
# inner_prods: (te, im, γ, try, trx)
comp = time() - tmr_start
return inner_prods, precomp, comp
def calc_ftk_svd(n_bessel, eps, pf_grid, tr_grid):
all_UU = [None] * (2 * n_bessel + 1)
all_SSVV = [None] * (2 * n_bessel + 1)
all_rnks = np.zeros(2 * n_bessel + 1, dtype=np.int32)
xnodesr = pf_grid['xnodesr']
all_delta = tr_grid['all_delta']
n_delta = tr_grid['n_delta']
n_omega = tr_grid['n_omega']
n_trans = tr_grid['n_trans']
for qp in range(-n_bessel, n_bessel + 1):
J_n = besselj(qp, -all_delta[:, np.newaxis] * xnodesr[np.newaxis, :])
U, S, Vh = | np.linalg.svd(J_n) | numpy.linalg.svd |
'''
Utility functions to analyze particle data.
@author: <NAME> <<EMAIL>>
Units: unless otherwise noted, all quantities are in (combinations of):
mass [M_sun]
position [kpc comoving]
distance, radius [kpc physical]
velocity [km / s]
time [Gyr]
'''
# system ----
from __future__ import absolute_import, division, print_function # python 2 compatability
import numpy as np
from numpy import Inf
# local ----
from . import basic as ut
from . import halo_property
from . import orbit
from . import catalog
#===================================================================================================
# utilities - parsing input arguments
#===================================================================================================
def parse_species(part, species):
'''
Parse input list of species to ensure all are in catalog.
Parameters
----------
part : dict : catalog of particles
species : str or list : name[s] of particle species to analyze
Returns
-------
species : list : name[s] of particle species
'''
Say = ut.io.SayClass(parse_species)
if np.isscalar(species):
species = [species]
if species == ['all'] or species == ['total']:
species = list(part.keys())
elif species == ['baryon']:
species = ['gas', 'star']
for spec in list(species):
if spec not in part:
species.remove(spec)
Say.say('! {} not in particle catalog'.format(spec))
return species
def parse_indices(part_spec, part_indices):
'''
Parse input list of particle indices.
If none, generate via arange.
Parameters
----------
part_spec : dict : catalog of particles of given species
part_indices : array-like : indices of particles
Returns
-------
part_indices : array : indices of particles
'''
if part_indices is None or not len(part_indices):
if 'position' in part_spec:
part_indices = ut.array.get_arange(part_spec['position'].shape[0])
elif 'id' in part_spec:
part_indices = ut.array.get_arange(part_spec['id'].size)
elif 'mass' in part_spec:
part_indices = ut.array.get_arange(part_spec['mass'].size)
return part_indices
def parse_property(parts_or_species, property_name, property_values=None, single_host=True):
'''
Get property values, either input or stored in particle catalog.
List-ify as necessary to match input particle catalog.
Parameters
----------
parts_or_species : dict or string or list thereof :
catalog[s] of particles or string[s] of species
property_name : str : options: 'center_position', 'center_velocity', 'indices'
property_values : float/array or list thereof : property values to assign
single_host : bool : use only the primary host (if not input any property_values)
Returns
-------
property_values : float or list
'''
def parse_property_single(part_or_spec, property_name, property_values, single_host):
if property_name in ['center_position', 'center_velocity']:
if property_values is None or not len(property_values):
if property_name == 'center_position':
property_values = part_or_spec.host_positions
elif property_name == 'center_velocity':
# default to the primary host
property_values = part_or_spec.host_velocities
if property_values is None or not len(property_values):
raise ValueError('no input {} and no {} in input catalog'.format(
property_name, property_name))
if single_host:
property_values = property_values[0] # use omly the primary host
if isinstance(property_values, list):
raise ValueError('input list of {}s but input single catalog'.format(property_name))
return property_values
assert property_name in ['center_position', 'center_velocity', 'indices']
if isinstance(parts_or_species, list):
# input list of particle catalogs
if (property_values is None or not len(property_values) or
not isinstance(property_values, list)):
property_values = [property_values for _ in parts_or_species]
if len(property_values) != len(parts_or_species):
raise ValueError('number of input {}s not match number of input catalogs'.format(
property_name))
for i, part_or_spec in enumerate(parts_or_species):
property_values[i] = parse_property_single(
part_or_spec, property_name, property_values[i], single_host)
else:
# input single particle catalog
property_values = parse_property_single(
parts_or_species, property_name, property_values, single_host)
return property_values
#===================================================================================================
# id <-> index conversion
#===================================================================================================
def assign_id_to_index(
part, species=['all'], id_name='id', id_min=0, store_as_dict=False, print_diagnostic=True):
'''
Assign, to particle dictionary, arrays that points from object id to species kind and index in
species array.
This is useful for analyses multi-species catalogs with intermixed ids.
Do not assign pointers for ids below id_min.
Parameters
----------
part : dict : catalog of particles of various species
species : str or list : name[s] of species to use: 'all' = use all in particle dictionary
id_name : str : key name for particle id
id_min : int : minimum id in catalog
store_as_dict : bool : whether to store id-to-index pointer as dict instead of array
print_diagnostic : bool : whether to print diagnostic information
'''
Say = ut.io.SayClass(assign_id_to_index)
# get list of species that have valid id key
species = parse_species(part, species)
for spec in species:
assert id_name in part[spec]
# get list of all ids
ids_all = []
for spec in species:
ids_all.extend(part[spec][id_name])
ids_all = np.array(ids_all, dtype=part[spec][id_name].dtype)
if print_diagnostic:
# check if duplicate ids within species
for spec in species:
masks = (part[spec][id_name] >= id_min)
total_number = np.sum(masks)
unique_number = np.unique(part[spec][id_name][masks]).size
if total_number != unique_number:
Say.say('species {} has {} ids that are repeated'.format(
spec, total_number - unique_number))
# check if duplicate ids across species
if len(species) > 1:
masks = (ids_all >= id_min)
total_number = np.sum(masks)
unique_number = np.unique(ids_all[masks]).size
if total_number != unique_number:
Say.say('across all species, {} ids are repeated'.format(
total_number - unique_number))
Say.say('maximum id = {}'.format(ids_all.max()))
part.id_to_index = {}
if store_as_dict:
# store pointers as a dictionary
# store overall dictionary (across all species) and dictionary within each species
for spec in species:
part[spec].id_to_index = {}
for part_i, part_id in enumerate(part[spec][id_name]):
if part_id in part.id_to_index:
# redundant ids - add to existing entry as list
if isinstance(part.id_to_index[part_id], tuple):
part.id_to_index[part_id] = [part.id_to_index[part_id]]
part.id_to_index[part_id].append((spec, part_i))
if part_id in part[spec].id_to_index:
if np.isscalar(part[spec].id_to_index[part_id]):
part[spec].id_to_index[part_id] = [part[spec].id_to_index[part_id]]
part[spec].id_to_index[part_id].append(part_i)
else:
# new id - add as new entry
part.id_to_index[part_id] = (spec, part_i)
part[spec].id_to_index[part_id] = part_i
# convert lists to arrays
dtype = part[spec][id_name].dtype
for part_id in part[spec].id_to_index:
if isinstance(part[spec].id_to_index[part_id], list):
part[spec].id_to_index[part_id] = np.array(
part[spec].id_to_index[part_id], dtype=dtype)
else:
# store pointers as arrays
part.id_to_index['species'] = np.zeros(ids_all.max() + 1, dtype='|S6')
dtype = ut.array.parse_data_type(ids_all.max() + 1)
part.id_to_index['index'] = ut.array.get_array_null(ids_all.max() + 1, dtype=dtype)
for spec in species:
masks = (part[spec][id_name] >= id_min)
part.id_to_index['species'][part[spec][id_name][masks]] = spec
part.id_to_index['index'][part[spec][id_name][masks]] = ut.array.get_arange(
part[spec][id_name], dtype=dtype)[masks]
#===================================================================================================
# position, velocity
#===================================================================================================
def get_center_positions(
part, species=['star', 'dark', 'gas'], part_indicess=None, method='center-of-mass',
center_number=1, exclusion_distance=200, center_positions=None, distance_max=Inf,
compare_centers=False, return_array=True):
'''
Get position[s] of center of mass [kpc comoving] using iterative zoom-in on input species.
Parameters
----------
part : dict : dictionary of particles
species : str or list : name[s] of species to use: 'all' = use all in particle dictionary
part_indicess : array or list of arrays : indices of particle to use to define center
use this to include only particles that you know are relevant
method : str : method of centering: 'center-of-mass', 'potential'
center_number : int : number of centers to compute
exclusion_distance : float :
radius around previous center to cut before finding next center [kpc comoving]
center_position : array-like : initial center position[s] to use
distance_max : float : maximum radius to consider initially
compare_centers : bool : whether to run sanity check to compare centers via zoom v potential
return_array : bool :
whether to return single array instead of array of arrays, if center_number = 1
Returns
-------
center_positions : array or array of arrays: position[s] of center[s] [kpc comoving]
'''
Say = ut.io.SayClass(get_center_positions)
assert method in ['center-of-mass', 'potential']
species = parse_species(part, species)
part_indicess = parse_property(species, 'indices', part_indicess)
if center_positions is None or np.ndim(center_positions) == 1:
# list-ify center_positions
center_positions = [center_positions for _ in range(center_number)]
if np.shape(center_positions)[0] != center_number:
raise ValueError('! input center_positions = {} but also input center_number = {}'.format(
center_positions, center_number))
if method == 'potential':
if len(species) > 1:
Say.say('! using only first species = {} for centering via potential'.format(
species[0]))
if 'potential' not in part[species[0]]:
Say.say('! {} does not have potential, using center-of-mass zoom instead'.format(
species[0]))
method = 'center-of-mass'
if method == 'potential':
# use single (first) species
spec_i = 0
spec_name = species[spec_i]
part_indices = parse_indices(spec_name, part_indicess[spec_i])
for center_i, center_position in enumerate(center_positions):
if center_i > 0:
# cull out particles near previous center
distances = get_distances_wrt_center(
part, spec_name, part_indices, center_positions[center_i - 1],
total_distance=True, return_array=True)
# exclusion distance in [kpc comoving]
part_indices = part_indices[
distances > (exclusion_distance * part.info['scalefactor'])]
if center_position is not None and distance_max > 0 and distance_max < Inf:
# impose distance cut around input center
part_indices = get_indices_within_coordinates(
part, spec_name, [0, distance_max], center_position, part_indicess=part_indices,
return_array=True)
part_index = np.nanargmin(part[spec_name]['potential'][part_indices])
center_positions[center_i] = part[spec_name]['position'][part_index]
else:
for spec_i, spec_name in enumerate(species):
part_indices = parse_indices(part[spec_name], part_indicess[spec_i])
if spec_i == 0:
positions = part[spec_name]['position'][part_indices]
masses = part[spec_name]['mass'][part_indices]
else:
positions = np.concatenate(
[positions, part[spec_name]['position'][part_indices]])
masses = np.concatenate([masses, part[spec_name]['mass'][part_indices]])
for center_i, center_position in enumerate(center_positions):
if center_i > 0:
# remove particles near previous center
distances = ut.coordinate.get_distances(
positions, center_positions[center_i - 1], part.info['box.length'],
part.snapshot['scalefactor'], total_distance=True) # [kpc physical]
masks = (distances > (exclusion_distance * part.info['scalefactor']))
positions = positions[masks]
masses = masses[masks]
center_positions[center_i] = ut.coordinate.get_center_position_zoom(
positions, masses, part.info['box.length'], center_position=center_position,
distance_max=distance_max)
center_positions = np.array(center_positions)
if compare_centers:
position_dif_max = 1 # [kpc comoving]
if 'potential' not in part[species[0]]:
Say.say('! {} not have potential, cannot compare against zoom center-of-mass'.format(
species[0]))
return center_positions
if method == 'potential':
method_other = 'center-of-mass'
else:
method_other = 'potential'
center_positions_other = get_center_positions(
part, species, part_indicess, method_other, center_number, exclusion_distance,
center_positions, distance_max, compare_centers=False, return_array=False)
position_difs = np.abs(center_positions - center_positions_other)
for pi, position_dif in enumerate(position_difs):
if np.max(position_dif) > position_dif_max:
Say.say('! offset center positions')
Say.say('center position via {}: '.format(method), end='')
ut.io.print_array(center_positions[pi], '{:.3f}')
Say.say('center position via {}: '.format(method_other), end='')
ut.io.print_array(center_positions_other[pi], '{:.3f}')
Say.say('position difference: ', end='')
ut.io.print_array(position_dif, '{:.3f}')
if return_array and center_number == 1:
center_positions = center_positions[0]
return center_positions
def get_center_velocities(
part, species_name='star', part_indices=None, distance_max=15, center_positions=None,
return_array=True):
'''
Get velocity[s] [km / s] of center of mass of input species.
Parameters
----------
part : dict : dictionary of particles
species_name : str : name of particle species to use
part_indices : array : indices of particle to use to define center
use this to exclude particles that you know are not relevant
distance_max : float : maximum radius to consider [kpc physical]
center_positions : array or list of arrays: center position[s] [kpc comoving]
if None, will use default center position[s] in catalog
return_array : bool :
whether to return single array instead of array of arrays, if input single center position
Returns
-------
center_velocities : array or array of arrays : velocity[s] of center of mass [km / s]
'''
center_positions = parse_property(part, 'center_position', center_positions, single_host=False)
part_indices = parse_indices(part[species_name], part_indices)
distance_max /= part.snapshot['scalefactor'] # convert to [kpc comoving] to match positions
center_velocities = np.zeros(center_positions.shape, part[species_name]['velocity'].dtype)
for center_i, center_position in enumerate(center_positions):
center_velocities[center_i] = ut.coordinate.get_center_velocity(
part[species_name]['velocity'][part_indices],
part[species_name]['mass'][part_indices],
part[species_name]['position'][part_indices],
center_position, distance_max, part.info['box.length'])
if return_array and len(center_velocities) == 1:
center_velocities = center_velocities[0]
return center_velocities
def get_distances_wrt_center(
part, species=['star'], part_indicess=None, center_position=None, rotation=None,
coordinate_system='cartesian', total_distance=False, return_array=True):
'''
Get distances (scalar or vector) between input particles and center_position (input or stored
in particle catalog).
Parameters
----------
part : dict : catalog of particles at snapshot
species : str or list : name[s] of particle species to compute
part_indicess : array or list : indices[s] of particles to compute, one array per input species
center_position : array : position of center [kpc comoving]
if None, will use default center position in particle catalog
rotation : bool or array : whether to rotate particles
two options:
(a) if input array of eigen-vectors, will define rotation axes for all species
(b) if True, will rotate to align with principal axes defined by input species
coordinate_system : str : which coordinates to get distances in:
'cartesian' (default), 'cylindrical', 'spherical'
total_distance : bool : whether to compute total/scalar distance
return_array : bool : whether to return single array instead of dict if input single species
Returns
-------
dist : array (object number x dimension number) or dict thereof : [kpc physical]
3-D distance vectors aligned with default x,y,z axes OR
3-D distance vectors aligned with major, medium, minor axis OR
2-D distance vectors along major axes and along minor axis OR
1-D scalar distances
OR
dictionary of above for each species
'''
assert coordinate_system in ('cartesian', 'cylindrical', 'spherical')
species = parse_species(part, species)
center_position = parse_property(part, 'center_position', center_position)
part_indicess = parse_property(species, 'indices', part_indicess)
dist = {}
for spec_i, spec in enumerate(species):
part_indices = parse_indices(part[spec], part_indicess[spec_i])
dist[spec] = ut.coordinate.get_distances(
part[spec]['position'][part_indices], center_position, part.info['box.length'],
part.snapshot['scalefactor'], total_distance) # [kpc physical]
if not total_distance:
if rotation is not None:
if rotation is True:
# get principal axes stored in particle dictionary
if (len(part[spec].host_rotation_tensors) and
len(part[spec].host_rotation_tensors[0])):
rotation_tensor = part[spec].host_rotation_tensors[0]
else:
raise ValueError('! cannot find principal_axes_tensor in species dict')
elif len(rotation):
# use input rotation vectors
rotation_tensor = rotation
dist[spec] = ut.coordinate.get_coordinates_rotated(dist[spec], rotation_tensor)
if coordinate_system in ['cylindrical', 'spherical']:
dist[spec] = ut.coordinate.get_positions_in_coordinate_system(
dist[spec], 'cartesian', coordinate_system)
if return_array and len(species) == 1:
dist = dist[species[0]]
return dist
def get_velocities_wrt_center(
part, species=['star'], part_indicess=None, center_velocity=None, center_position=None,
rotation=False, coordinate_system='cartesian', total_velocity=False, return_array=True):
'''
Get velocities (either scalar or vector) between input particles and center_velocity
(input or stored in particle catalog).
Parameters
----------
part : dict : catalog of particles at snapshot
species : str or list : name[s] of particle species to get
part_indicess : array or list : indices[s] of particles to select, one array per input species
center_velocity : array : center velocity [km / s]
if None, will use default center velocity in catalog
center_position : array : center position [kpc comoving], to use in computing Hubble flow
if None, will use default center position in catalog
rotation : bool or array : whether to rotate particles
two options:
(a) if input array of eigen-vectors, will define rotation axes for all species
(b) if True, will rotate to align with principal axes defined by input species
coordinate_system : str : which coordinates to get positions in:
'cartesian' (default), 'cylindrical', 'spherical'
total_velocity : bool : whether to compute total/scalar velocity
return_array : bool : whether to return array (instead of dict) if input single species
Returns
-------
vel : array or dict thereof :
velocities (object number x dimension number, or object number) [km / s]
'''
assert coordinate_system in ('cartesian', 'cylindrical', 'spherical')
species = parse_species(part, species)
center_velocity = parse_property(part, 'center_velocity', center_velocity)
center_position = parse_property(part, 'center_position', center_position)
part_indicess = parse_property(species, 'indices', part_indicess)
vel = {}
for spec_i, spec in enumerate(species):
part_indices = parse_indices(part[spec], part_indicess[spec_i])
vel[spec] = ut.coordinate.get_velocity_differences(
part[spec]['velocity'][part_indices], center_velocity,
part[spec]['position'][part_indices], center_position, part.info['box.length'],
part.snapshot['scalefactor'], part.snapshot['time.hubble'], total_velocity)
if not total_velocity:
if rotation is not None:
if rotation is True:
# get principal axes stored in particle dictionary
if (len(part[spec].host_rotation_tensors) and
len(part[spec].host_rotation_tensors[0])):
rotation_tensor = part[spec].host_rotation_tensors[0]
else:
raise ValueError('! cannot find principal_axes_tensor in species dict')
elif len(rotation):
# use input rotation vectors
rotation_tensor = rotation
vel[spec] = ut.coordinate.get_coordinates_rotated(vel[spec], rotation_tensor)
if coordinate_system in ('cylindrical', 'spherical'):
# need to compute distance vectors
distances = ut.coordinate.get_distances(
part[spec]['position'][part_indices], center_position,
part.info['box.length'], part.snapshot['scalefactor']) # [kpc physical]
if rotation is not None:
# need to rotate distances too
distances = ut.coordinate.get_coordinates_rotated(distances, rotation_tensor)
vel[spec] = ut.coordinate.get_velocities_in_coordinate_system(
vel[spec], distances, 'cartesian', coordinate_system)
if return_array and len(species) == 1:
vel = vel[species[0]]
return vel
def get_orbit_dictionary(
part, species=['star'], part_indicess=None, center_position=None, center_velocity=None,
return_single=True):
'''
Get dictionary of orbital parameters.
Parameters
----------
part : dict : catalog of particles at snapshot
species : str or list : name[s] of particle species to compute
part_indicess : array or list : indices[s] of particles to select, one array per input species
center_position : array : center (reference) position
center_position : array : center (reference) velociy
return_single : bool :
whether to return single dict instead of dict of dicts, if single species
Returns
-------
orb : dict : dictionary of orbital properties, one for each species (unless scalarize is True)
'''
species = parse_species(part, species)
center_position = parse_property(part, 'center_position', center_position)
center_velocity = parse_property(part, 'center_velocity', center_velocity)
part_indicess = parse_property(species, 'indices', part_indicess)
orb = {}
for spec_i, spec in enumerate(species):
part_indices = parse_indices(part[spec], part_indicess[spec_i])
distance_vectors = ut.coordinate.get_distances(
part[spec]['position'][part_indices], center_position, part.info['box.length'],
part.snapshot['scalefactor'])
velocity_vectors = ut.coordinate.get_velocity_differences(
part[spec]['velocity'][part_indices], center_velocity,
part[spec]['position'][part_indices], center_position,
part.info['box.length'], part.snapshot['scalefactor'], part.snapshot['time.hubble'])
orb[spec] = orbit.get_orbit_dictionary(distance_vectors, velocity_vectors)
if return_single and len(species) == 1:
orb = orb[species[0]]
return orb
#===================================================================================================
# subsample
#===================================================================================================
def get_indices_within_coordinates(
part, species=['star'],
distance_limitss=[], center_position=None,
velocity_limitss=[], center_velocity=None,
rotation=None, coordinate_system='cartesian',
part_indicess=None, return_array=True):
'''
Get indices of particles that are within distance and/or velocity coordinate limits from center
(either input or stored in particle catalog).
Parameters
----------
part : dict : catalog of particles at snapshot
species : str or list : name[s] of particle species to use
distance_limitss : list or list of lists:
min and max distance[s], relative to center, to get particles [kpc physical]
default is 1-D list, but can be 2-D or 3-D list to select separately along dimensions
if 2-D or 3-D, need to input *signed* limits
center_position : array : center position [kpc comoving]
if None, will use default center position in particle catalog
velocity_limitss : list or list of lists:
min and max velocities, relative to center, to get particles [km / s]
default is 1-D list, but can be 2-D or 3-D list to select separately along dimensions
if 2-D or 3-D, need to input *signed* limits
center_velocity : array : center velocity [km / s]
if None, will use default center velocity in particle catalog
rotation : bool or array : whether to rotate particle coordinates
two options:
(a) if input array of eigen-vectors, will use to define rotation axes for all species
(b) if True, will rotate to align with principal axes defined by each input species
coordinate_system : str : which coordinates to get positions in:
'cartesian' (default), 'cylindrical', 'spherical'
part_indicess : array : prior indices[s] of particles to select, one array per input species
return_array : bool : whether to return single array instead of dict, if input single species
Returns
-------
part_index : dict or array : array or dict of arrays of indices of particles in region
'''
assert coordinate_system in ['cartesian', 'cylindrical', 'spherical']
species = parse_species(part, species)
center_position = parse_property(part, 'center_position', center_position)
if velocity_limitss is not None and len(velocity_limitss):
center_velocity = parse_property(part, 'center_velocity', center_velocity)
part_indicess = parse_property(species, 'indices', part_indicess)
part_index = {}
for spec_i, spec in enumerate(species):
part_indices = parse_indices(part[spec], part_indicess[spec_i])
if len(part_indices) and distance_limitss is not None and len(distance_limitss):
distance_limits_dimen = np.ndim(distance_limitss)
if distance_limits_dimen == 1:
total_distance = True
elif distance_limits_dimen == 2:
total_distance = False
assert len(distance_limitss) in [2, 3]
else:
raise ValueError('! cannot parse distance_limitss = {}'.format(distance_limitss))
if (distance_limits_dimen == 1 and distance_limitss[0] <= 0 and
distance_limitss[1] >= Inf):
pass # null case, no actual limits imposed, so skip rest
else:
"""
# an attempt to be clever, but gains seem modest
distances = np.abs(coordinate.get_position_difference(
part[spec]['position'] - center_position,
part.info['box.length'])) * part.snapshot['scalefactor'] # [kpc physical]
for dimension_i in range(part[spec]['position'].shape[1]):
masks *= ((distances[:, dimension_i] < np.max(distance_limits)) *
(distances[:, dimension_i] >= np.min(distance_limits)))
part_indices[spec] = part_indices[spec][masks]
distances = distances[masks]
distances = np.sum(distances ** 2, 1) # assume 3-d position
"""
distancess = get_distances_wrt_center(
part, spec, part_indices, center_position, rotation, coordinate_system,
total_distance)
if distance_limits_dimen == 1:
# distances are absolute
masks = (
(distancess >= np.min(distance_limitss)) *
(distancess < np.max(distance_limitss))
)
elif distance_limits_dimen == 2:
if len(distance_limitss) == 2:
# distances are signed
masks = (
(distancess[0] >= np.min(distance_limitss[0])) *
(distancess[0] < np.max(distance_limitss[0])) *
(distancess[1] >= np.min(distance_limitss[1])) *
(distancess[1] < np.max(distance_limitss[1]))
)
elif distance_limits_dimen == 3:
# distances are signed
masks = (
(distancess[0] >= np.min(distance_limitss[0])) *
(distancess[0] < np.max(distance_limitss[0])) *
(distancess[1] >= np.min(distance_limitss[1])) *
(distancess[1] < np.max(distance_limitss[1]))
(distancess[2] >= np.min(distance_limitss[2])) *
(distancess[2] < np.max(distance_limitss[2]))
)
part_indices = part_indices[masks]
if len(part_indices) and velocity_limitss is not None and len(velocity_limitss):
velocity_limits_dimen = np.ndim(velocity_limitss)
if velocity_limits_dimen == 1:
return_total_velocity = True
elif velocity_limits_dimen == 2:
return_total_velocity = False
assert len(velocity_limitss) in [2, 3]
else:
raise ValueError('! cannot parse velocity_limitss = {}'.format(velocity_limitss))
if (velocity_limits_dimen == 1 and velocity_limitss[0] <= 0 and
velocity_limitss[1] >= Inf):
pass # null case, no actual limits imposed, so skip rest
else:
velocitiess = get_velocities_wrt_center(
part, spec, part_indices, center_velocity, center_position, rotation,
coordinate_system, return_total_velocity)
if velocity_limits_dimen == 1:
# velocities are absolute
masks = (
(velocitiess >= np.min(velocity_limitss)) *
(velocitiess < np.max(velocity_limitss))
)
elif velocity_limits_dimen == 2:
if len(velocity_limitss) == 2:
# velocities are signed
masks = (
(velocitiess[0] >= np.min(velocity_limitss[0])) *
(velocitiess[0] < np.max(velocity_limitss[0])) *
(velocitiess[1] >= np.min(velocity_limitss[1])) *
(velocitiess[1] < np.max(velocity_limitss[1]))
)
elif len(velocity_limitss) == 3:
# velocities are signed
masks = (
(velocitiess[0] >= np.min(velocity_limitss[0])) *
(velocitiess[0] < np.max(velocity_limitss[0])) *
(velocitiess[1] >= np.min(velocity_limitss[1])) *
(velocitiess[1] < np.max(velocity_limitss[1]))
(velocitiess[2] >= np.min(velocity_limitss[2])) *
(velocitiess[2] < np.max(velocity_limitss[2]))
)
part_indices = part_indices[masks]
part_index[spec] = part_indices
if return_array and len(species) == 1:
part_index = part_index[species[0]]
return part_index
def get_indices_id_kind(
part, species=['star'], id_kind='unique', part_indicess=None, return_array=True):
'''
Get indices of particles that either are unique (no other particles of same species have
same id) or multiple (other particle of same species has same id).
Parameters
----------
part : dict : catalog of particles at snapshot
species : str or list : name[s] of particle species
split_kind : str : id kind of particles to get: 'unique', 'multiple'
part_indicess : array : prior indices[s] of particles to select, one array per input species
return_array : bool : whether to return single array instead of dict, if input single species
Returns
-------
part_index : dict or array : array or dict of arrays of indices of particles of given split kind
'''
species = parse_species(part, species)
part_indicess = parse_property(species, 'indices', part_indicess)
assert id_kind in ['unique', 'multiple']
part_index = {}
for spec_i, spec in enumerate(species):
part_indices = parse_indices(part[spec], part_indicess[spec_i])
_pids, piis, counts = np.unique(
part[spec]['id'][part_indices], return_index=True, return_counts=True)
pis_unsplit = np.sort(part_indices[piis[counts == 1]])
if id_kind == 'unique':
part_index[spec] = pis_unsplit
elif id_kind == 'multiple':
part_index[spec] = np.setdiff1d(part_indices, pis_unsplit)
else:
raise ValueError('! not recognize id_kind = {}'.format(id_kind))
if return_array and len(species) == 1:
part_index = part_index[species[0]]
return part_index
#===================================================================================================
# halo/galaxy major/minor axes
#===================================================================================================
def get_principal_axes(
part, species_name='star', distance_max=Inf, mass_percent=None, age_percent=None, age_limits=[],
center_positions=None, center_velocities=None, part_indices=None, return_array=True,
print_results=True):
'''
Get reverse-sorted eigen-vectors, eigen-values, and axis ratios of principal axes of
each host galaxy/halo.
Ensure that principal axes are oriented so median v_phi > 0.
Parameters
----------
part : dict : catalog of particles at snapshot
species : str or list : name[s] of particle species to use
distance_max : float : maximum distance to select particles [kpc physical]
mass_percent : float : keep particles within the distance that encloses mass percent [0, 100]
of all particles within distance_max
age_percent : float : use the youngest age_percent of particles within distance cut
age_limits : float : use only particles within age limits
center_positions : array or array of arrays : position[s] of center[s] [kpc comoving]
center_velocities : array or array of arrays : velocity[s] of center[s] [km / s]
part_indices : array : indices[s] of particles to select
return_array : bool :
whether to return single array for each property, instead of array of arrays, if single host
print_results : bool : whether to print axis ratios
Returns
-------
principal_axes = {
'rotation.tensor': array : rotation vectors that define max, med, min axes
'eigen.values': array : eigen-values of max, med, min axes
'axis.ratios': array : ratios of principal axes
}
'''
Say = ut.io.SayClass(get_principal_axes)
center_positions = parse_property(part, 'center_position', center_positions, single_host=False)
center_velocities = parse_property(
part, 'center_velocity', center_velocities, single_host=False)
part_indices = parse_indices(part[species_name], part_indices)
principal_axes = {
'rotation.tensor': [],
'eigen.values': [],
'axis.ratios': [],
}
for center_i, center_position in enumerate(center_positions):
distance_vectors = ut.coordinate.get_distances(
part[species_name]['position'][part_indices], center_position, part.info['box.length'],
part.snapshot['scalefactor']) # [kpc physical]
distances = np.sqrt(np.sum(distance_vectors ** 2, 1))
masks = (distances < distance_max)
if mass_percent:
distance_percent = ut.math.percentile_weighted(
distances[masks], mass_percent,
part[species_name].prop('mass', part_indices[masks]))
masks *= (distances < distance_percent)
if age_percent or (age_limits is not None and len(age_limits)):
if 'form.scalefactor' not in part[species_name]:
raise ValueError('! input age constraints but age not in {} catalog'.format(
species_name))
if age_percent and (age_limits is not None and len(age_limits)):
Say.say('input both age_percent and age_limits, using only age_percent')
if age_percent:
age_max = ut.math.percentile_weighted(
part[species_name].prop('age', part_indices[masks]), age_percent,
part[species_name].prop('mass', part_indices[masks]))
age_limits_use = [0, age_max]
else:
age_limits_use = age_limits
Say.say('using {} particles with age = {} Gyr'.format(
species_name, ut.array.get_limits_string(age_limits_use)))
masks *= ((part[species_name].prop('age', part_indices) >= min(age_limits_use)) *
(part[species_name].prop('age', part_indices) < max(age_limits_use)))
rotation_tensor, eigen_values, axis_ratios = ut.coordinate.get_principal_axes(
distance_vectors[masks], part[species_name].prop('mass', part_indices[masks]),
print_results)
# test if need to flip a principal axis to ensure that net v_phi > 0
velocity_vectors = ut.coordinate.get_velocity_differences(
part[species_name].prop('velocity', part_indices[masks]), center_velocities[center_i])
velocity_vectors_rot = ut.coordinate.get_coordinates_rotated(
velocity_vectors, rotation_tensor)
distance_vectors_rot = ut.coordinate.get_coordinates_rotated(
distance_vectors[masks], rotation_tensor)
velocity_vectors_cyl = ut.coordinate.get_velocities_in_coordinate_system(
velocity_vectors_rot, distance_vectors_rot, 'cartesian', 'cylindrical')
if np.median(velocity_vectors_cyl[:, 2]) < 0:
rotation_tensor[1] *= -1 # flip so net v_phi is positive
principal_axes['rotation.tensor'].append(rotation_tensor)
principal_axes['eigen.values'].append(eigen_values)
principal_axes['axis.ratios'].append(axis_ratios)
for k in principal_axes:
principal_axes[k] = np.array(principal_axes[k])
if return_array and np.shape(center_positions)[0] == 1:
for k in principal_axes:
principal_axes[k] = principal_axes[k][0]
return principal_axes
#===================================================================================================
# halo/galaxy radius
#===================================================================================================
def get_halo_properties(
part, species=['dark', 'star', 'gas'], virial_kind='200m',
distance_limits=[10, 600], distance_bin_width=0.02, distance_scaling='log',
center_position=None, return_array=True, print_results=True):
'''
Compute halo radius according to virial_kind.
Return this radius, the mass from each species within this radius, and particle indices within
this radius (if get_part_indices).
Parameters
----------
part : dict : catalog of particles at snapshot
species : str or list : name[s] of particle species to use: 'all' = use all in dictionary
virial_kind : str : virial overdensity definition
'200m' -> average density is 200 x matter
'200c' -> average density is 200 x critical
'vir' -> average density is Bryan & Norman
'fof.100m' -> edge density is 100 x matter, for FoF(ll=0.168)
'fof.60m' -> edge density is 60 x matter, for FoF(ll=0.2)
distance_limits : list : min and max distance to consider [kpc physical]
distance_bin_width : float : width of distance bin
distance_scaling : str : scaling of distance: 'log', 'linear'
center_position : array : center position to use
if None, will use default center position in catalog
return_array : bool : whether to return array (instead of dict) if input single species
print_results : bool : whether to print radius and mass
Returns
-------
halo_prop : dict : dictionary of halo properties:
radius : float : halo radius [kpc physical]
mass : float : mass within radius [M_sun]
indices : array : indices of partices within radius (if get_part_indices)
'''
distance_limits = np.asarray(distance_limits)
Say = ut.io.SayClass(get_halo_properties)
species = parse_species(part, species)
center_position = parse_property(part, 'center_position', center_position)
HaloProperty = halo_property.HaloPropertyClass(part.Cosmology, part.snapshot['redshift'])
DistanceBin = ut.binning.DistanceBinClass(
distance_scaling, distance_limits, width=distance_bin_width, dimension_number=3)
overdensity, reference_density = HaloProperty.get_overdensity(virial_kind, units='kpc physical')
virial_density = overdensity * reference_density
mass_cum_in_bins = np.zeros(DistanceBin.number)
distancess = []
for spec_i, spec in enumerate(species):
distances = ut.coordinate.get_distances(
part[spec]['position'], center_position, part.info['box.length'],
part.snapshot['scalefactor'], total_distance=True) # [kpc physical]
distancess.append(distances)
mass_in_bins = DistanceBin.get_histogram(distancess[spec_i], False, part[spec]['mass'])
# get mass within distance minimum, for computing cumulative values
distance_indices = np.where(distancess[spec_i] < np.min(distance_limits))[0]
mass_cum_in_bins += (np.sum(part[spec]['mass'][distance_indices]) +
np.cumsum(mass_in_bins))
if part.info['baryonic'] and len(species) == 1 and species[0] == 'dark':
# correct for baryonic mass if analyzing only dark matter in baryonic simulation
Say.say('! using only dark particles, so correcting for baryonic mass')
mass_factor = 1 + part.Cosmology['omega_baryon'] / part.Cosmology['omega_matter']
mass_cum_in_bins *= mass_factor
# cumulative densities in bins
density_cum_in_bins = mass_cum_in_bins / DistanceBin.volumes_cum
# get smallest radius that satisfies virial density
for d_bin_i in range(DistanceBin.number - 1):
if (density_cum_in_bins[d_bin_i] >= virial_density and
density_cum_in_bins[d_bin_i + 1] < virial_density):
# interpolate in log space
log_halo_radius = np.interp(
np.log10(virial_density), np.log10(density_cum_in_bins[[d_bin_i + 1, d_bin_i]]),
DistanceBin.log_maxs[[d_bin_i + 1, d_bin_i]])
halo_radius = 10 ** log_halo_radius
break
else:
Say.say('! could not determine halo R_{}'.format(virial_kind))
if density_cum_in_bins[0] < virial_density:
Say.say('distance min = {:.1f} kpc already is below virial density = {}'.format(
distance_limits.min(), virial_density))
Say.say('decrease distance_limits')
elif density_cum_in_bins[-1] > virial_density:
Say.say('distance max = {:.1f} kpc still is above virial density = {}'.format(
distance_limits.max(), virial_density))
Say.say('increase distance_limits')
else:
Say.say('not sure why!')
return
# get maximum of V_circ = sqrt(G M(< r) / r)
vel_circ_in_bins = ut.constant.km_per_kpc * np.sqrt(
ut.constant.grav_kpc_msun_sec * mass_cum_in_bins / DistanceBin.maxs)
vel_circ_max = np.max(vel_circ_in_bins)
vel_circ_max_radius = DistanceBin.maxs[np.argmax(vel_circ_in_bins)]
halo_mass = 0
part_indices = {}
for spec_i, spec in enumerate(species):
masks = (distancess[spec_i] < halo_radius)
halo_mass += np.sum(part[spec]['mass'][masks])
part_indices[spec] = ut.array.get_arange(part[spec]['mass'])[masks]
if print_results:
Say.say(
'R_{} = {:.1f} kpc\n M_{} = {} M_sun, log = {}\n V_max = {:.1f} km/s'.format(
virial_kind, halo_radius, virial_kind,
ut.io.get_string_from_numbers(halo_mass, 2),
ut.io.get_string_from_numbers(np.log10(halo_mass), 2),
vel_circ_max)
)
halo_prop = {}
halo_prop['radius'] = halo_radius
halo_prop['mass'] = halo_mass
halo_prop['vel.circ.max'] = vel_circ_max
halo_prop['vel.circ.max.radius'] = vel_circ_max_radius
if return_array and len(species) == 1:
part_indices = part_indices[species[0]]
halo_prop['indices'] = part_indices
return halo_prop
def get_galaxy_properties(
part, species_name='star', edge_kind='mass.percent', edge_value=90,
distance_max=20, distance_bin_width=0.02, distance_scaling='log', center_position=None,
axis_kind='', rotation_tensor=None, rotation_distance_max=20,
other_axis_distance_limits=None, part_indices=None, print_results=True):
'''
Compute galaxy radius according to edge_kind.
Return this radius, the mass from species within this radius, particle indices within this
radius, and rotation vectors (if applicable).
Parameters
----------
part : dict : catalog of particles at snapshot
species_name : str : name of particle species to use
edge_kind : str : method to define galaxy radius
'mass.percent' = radius at which edge_value (percent) of stellar mass within distance_max
'density' = radius at which density is edge_value [log(M_sun / kpc^3)]
edge_value : float : value to use to define galaxy radius
mass_percent : float : percent of mass (out to distance_max) to define radius
distance_max : float : maximum distance to consider [kpc physical]
distance_bin_width : float : width of distance bin
distance_scaling : str : distance bin scaling: 'log', 'linear'
axis_kind : str : 'major', 'minor', 'both'
rotation_tensor : array : rotation vectors that define principal axes
rotation_distance_max : float :
maximum distance to use in defining rotation vectors of principal axes [kpc physical]
other_axis_distance_limits : float :
min and max distances along other axis[s] to keep particles [kpc physical]
center_position : array : center position [kpc comoving]
if None, will use default center position in catalog
part_indices : array : star particle indices (if already know which ones are close)
print_results : bool : whether to print radius and mass of galaxy
Returns
-------
gal_prop : dict : dictionary of galaxy properties:
radius or radius.major & radius.minor : float : galaxy radius[s] [kpc physical]
mass : float : mass within radius[s] [M_sun]
indices : array : indices of partices within radius[s] (if get_part_indices)
rotation.vectors : array : eigen-vectors that defined rotation
'''
def get_radius_mass_indices(
masses, distances, distance_scaling, distance_limits, distance_bin_width, dimension_number,
edge_kind, edge_value):
'''
Utility function.
'''
Say = ut.io.SayClass(get_radius_mass_indices)
DistanceBin = ut.binning.DistanceBinClass(
distance_scaling, distance_limits, width=distance_bin_width,
dimension_number=dimension_number)
# get masses in distance bins
mass_in_bins = DistanceBin.get_histogram(distances, False, masses)
if edge_kind == 'mass.percent':
# get mass within distance minimum, for computing cumulative values
d_indices = np.where(distances < np.min(distance_limits))[0]
log_masses_cum = ut.math.get_log(np.sum(masses[d_indices]) + np.cumsum(mass_in_bins))
log_mass = np.log10(edge_value / 100) + log_masses_cum.max()
try:
# interpolate in log space
log_radius = np.interp(log_mass, log_masses_cum, DistanceBin.log_maxs)
except ValueError:
Say.say('! could not find object radius - increase distance_max')
return
elif edge_kind == 'density':
log_density_in_bins = ut.math.get_log(mass_in_bins / DistanceBin.volumes)
# use only bins with defined density (has particles)
d_bin_indices = np.arange(DistanceBin.number)[np.isfinite(log_density_in_bins)]
# get smallest radius that satisfies density threshold
for d_bin_ii, d_bin_i in enumerate(d_bin_indices):
d_bin_i_plus_1 = d_bin_indices[d_bin_ii + 1]
if (log_density_in_bins[d_bin_i] >= edge_value and
log_density_in_bins[d_bin_i_plus_1] < edge_value):
# interpolate in log space
log_radius = np.interp(
edge_value, log_density_in_bins[[d_bin_i_plus_1, d_bin_i]],
DistanceBin.log_maxs[[d_bin_i_plus_1, d_bin_i]])
break
else:
Say.say('! could not find object radius - increase distance_max')
return
radius = 10 ** log_radius
masks = (distances < radius)
mass = np.sum(masses[masks])
indices = ut.array.get_arange(masses)[masks]
return radius, mass, indices
# start function
Say = ut.io.SayClass(get_galaxy_properties)
distance_min = 0.001 # [kpc physical]
distance_limits = [distance_min, distance_max]
if edge_kind == 'mass.percent':
# dealing with cumulative value - stable enough to decrease bin with
distance_bin_width *= 0.1
center_position = parse_property(part, 'center_position', center_position)
if part_indices is None or not len(part_indices):
part_indices = ut.array.get_arange(part[species_name]['position'].shape[0])
distance_vectors = ut.coordinate.get_distances(
part[species_name]['position'][part_indices], center_position,
part.info['box.length'], part.snapshot['scalefactor']) # [kpc physical]
distances = np.sqrt(np.sum(distance_vectors ** 2, 1)) # 3-D distance
masses = part[species_name].prop('mass', part_indices)
if axis_kind:
# radius along 2-D major axes (projected radius) or along 1-D minor axis (height)
assert axis_kind in ['major', 'minor', 'both']
if rotation_tensor is None or not len(rotation_tensor):
if (len(part[species_name].host_rotation_tensors) and
len(part[species_name].host_rotation_tensors[0])):
# use only the primary host
rotation_tensor = part[species_name].host_rotation_tensors[0]
else:
masks = (distances < rotation_distance_max)
rotation_tensor = ut.coordinate.get_principal_axes(
distance_vectors[masks], masses[masks])[0]
distance_vectors = ut.coordinate.get_coordinates_rotated(
distance_vectors, rotation_tensor=rotation_tensor)
distances_cyl = ut.coordinate.get_positions_in_coordinate_system(
distance_vectors, 'cartesian', 'cylindrical')
major_distances, minor_distances = distances_cyl[:, 0], distances_cyl[:, 1]
minor_distances = np.abs(minor_distances) # need only absolute distances
if axis_kind in ['major', 'minor']:
if axis_kind == 'minor':
dimension_number = 1
distances = minor_distances
other_distances = major_distances
elif axis_kind == 'major':
dimension_number = 2
distances = major_distances
other_distances = minor_distances
if (other_axis_distance_limits is not None and
(min(other_axis_distance_limits) > 0 or max(other_axis_distance_limits) < Inf)):
masks = ((other_distances >= min(other_axis_distance_limits)) *
(other_distances < max(other_axis_distance_limits)))
distances = distances[masks]
masses = masses[masks]
else:
# spherical average
dimension_number = 3
gal_prop = {}
if axis_kind == 'both':
# first get 3-D radius
galaxy_radius_3d, _galaxy_mass_3d, indices = get_radius_mass_indices(
masses, distances, distance_scaling, distance_limits, distance_bin_width, 3,
edge_kind, edge_value)
galaxy_radius_major = galaxy_radius_3d
axes_mass_dif = 1
# then iterate to get both major and minor axes
while axes_mass_dif > 0.005:
# get 1-D radius along minor axis
masks = (major_distances < galaxy_radius_major)
galaxy_radius_minor, galaxy_mass_minor, indices = get_radius_mass_indices(
masses[masks], minor_distances[masks], distance_scaling, distance_limits,
distance_bin_width, 1, edge_kind, edge_value)
# get 2-D radius along major axes
masks = (minor_distances < galaxy_radius_minor)
galaxy_radius_major, galaxy_mass_major, indices = get_radius_mass_indices(
masses[masks], major_distances[masks], distance_scaling, distance_limits,
distance_bin_width, 2, edge_kind, edge_value)
axes_mass_dif = (abs(galaxy_mass_major - galaxy_mass_minor) /
(0.5 * (galaxy_mass_major + galaxy_mass_minor)))
indices = (major_distances < galaxy_radius_major) * (minor_distances < galaxy_radius_minor)
gal_prop['radius.major'] = galaxy_radius_major
gal_prop['radius.minor'] = galaxy_radius_minor
gal_prop['mass'] = galaxy_mass_major
gal_prop['log mass'] = np.log10(galaxy_mass_major)
gal_prop['rotation.tensor'] = rotation_tensor
gal_prop['indices'] = part_indices[indices]
if print_results:
Say.say('R_{:.0f} along major, minor axes = {:.2f}, {:.2f} kpc physical'.format(
edge_value, galaxy_radius_major, galaxy_radius_minor))
else:
galaxy_radius, galaxy_mass, indices = get_radius_mass_indices(
masses, distances, distance_scaling, distance_limits, distance_bin_width,
dimension_number, edge_kind, edge_value)
gal_prop['radius'] = galaxy_radius
gal_prop['mass'] = galaxy_mass
gal_prop['log mass'] = np.log10(galaxy_mass)
gal_prop['indices'] = part_indices[indices]
if print_results:
Say.say('R_{:.0f} = {:.2f} kpc physical'.format(edge_value, galaxy_radius))
if print_results:
Say.say('M_star = {:.2e} M_sun, log = {:.2f}'.format(
gal_prop['mass'], gal_prop['log mass']))
return gal_prop
#===================================================================================================
# profiles of properties
#===================================================================================================
class SpeciesProfileClass(ut.binning.DistanceBinClass):
'''
Get profiles of either histogram/sum or stastitics (such as average, median) of given
property for given particle species.
__init__ is defined via ut.binning.DistanceBinClass
'''
def get_profiles(
self, part, species=['all'],
property_name='', property_statistic='sum', weight_by_mass=False,
center_position=None, center_velocity=None, rotation=None,
other_axis_distance_limits=None, property_select={}, part_indicess=None):
'''
Parse inputs into either get_sum_profiles() or get_statistics_profiles().
If know what you want, can skip this and jump to those functions.
Parameters
----------
part : dict : catalog of particles
species : str or list : name[s] of particle species to compute mass from
property_name : str : name of property to get statistics of
property_statistic : str : statistic to get profile of:
'sum', 'sum.cum', 'density', 'density.cum', 'vel.circ'
weight_by_mass : bool : whether to weight property by species mass
center_position : array : position of center
center_velocity : array : velocity of center
rotation : bool or array : whether to rotate particles - two options:
(a) if input array of eigen-vectors, will define rotation axes
(b) if True, will rotate to align with principal axes stored in species dictionary
other_axis_distance_limits : float :
min and max distances along other axis[s] to keep particles [kpc physical]
property_select : dict : (other) properties to select on: names as keys and limits as values
part_indicess : array (species number x particle number) :
indices of particles from which to select
Returns
-------
pros : dict : dictionary of profiles for each particle species
'''
if ('sum' in property_statistic or 'vel.circ' in property_statistic or
'density' in property_statistic):
pros = self.get_sum_profiles(
part, species, property_name, center_position, rotation, other_axis_distance_limits,
property_select, part_indicess)
else:
pros = self.get_statistics_profiles(
part, species, property_name, weight_by_mass, center_position, center_velocity,
rotation, other_axis_distance_limits, property_select, part_indicess)
for k in pros:
if '.cum' in property_statistic or 'vel.circ' in property_statistic:
pros[k]['distance'] = pros[k]['distance.cum']
pros[k]['log distance'] = pros[k]['log distance.cum']
else:
pros[k]['distance'] = pros[k]['distance.mid']
pros[k]['log distance'] = pros[k]['log distance.mid']
return pros
def get_sum_profiles(
self, part, species=['all'], property_name='mass', center_position=None,
rotation=None, other_axis_distance_limits=None, property_select={}, part_indicess=None):
'''
Get profiles of summed quantity (such as mass or density) for given property for each
particle species.
Parameters
----------
part : dict : catalog of particles
species : str or list : name[s] of particle species to compute mass from
property_name : str : property to get sum of
center_position : list : center position
rotation : bool or array : whether to rotate particles - two options:
(a) if input array of eigen-vectors, will define rotation axes
(b) if True, will rotate to align with principal axes stored in species dictionary
other_axis_distance_limits : float :
min and max distances along other axis[s] to keep particles [kpc physical]
property_select : dict : (other) properties to select on: names as keys and limits as values
part_indicess : array (species number x particle number) :
indices of particles from which to select
Returns
-------
pros : dict : dictionary of profiles for each particle species
'''
if 'gas' in species and 'consume.time' in property_name:
pros_mass = self.get_sum_profiles(
part, species, 'mass', center_position, rotation, other_axis_distance_limits,
property_select, part_indicess)
pros_sfr = self.get_sum_profiles(
part, species, 'sfr', center_position, rotation, other_axis_distance_limits,
property_select, part_indicess)
pros = pros_sfr
for k in pros_sfr['gas']:
if 'distance' not in k:
pros['gas'][k] = pros_mass['gas'][k] / pros_sfr['gas'][k] / 1e9
return pros
pros = {}
Fraction = ut.math.FractionClass()
if np.isscalar(species):
species = [species]
if species == ['baryon']:
# treat this case specially for baryon fraction
species = ['gas', 'star', 'dark', 'dark2']
species = parse_species(part, species)
center_position = parse_property(part, 'center_position', center_position)
part_indicess = parse_property(species, 'indices', part_indicess)
assert 0 < self.dimension_number <= 3
for spec_i, spec in enumerate(species):
part_indices = part_indicess[spec_i]
if part_indices is None or not len(part_indices):
part_indices = ut.array.get_arange(part[spec].prop(property_name))
if property_select:
part_indices = catalog.get_indices_catalog(
part[spec], property_select, part_indices)
prop_values = part[spec].prop(property_name, part_indices)
if self.dimension_number == 3:
# simple case: profile using scalar distance
distances = ut.coordinate.get_distances(
part[spec]['position'][part_indices], center_position, part.info['box.length'],
part.snapshot['scalefactor'], total_distance=True) # [kpc physical]
elif self.dimension_number in [1, 2]:
# other cases: profile along R (2 major axes) or Z (minor axis)
if rotation is not None and not isinstance(rotation, bool) and len(rotation):
rotation_tensor = rotation
elif (len(part[spec].host_rotation_tensors) and
len(part[spec].host_rotation_tensors[0])):
rotation_tensor = part[spec].host_rotation_tensors[0]
else:
raise ValueError('want 2-D or 1-D profile but no means to define rotation')
distancess = get_distances_wrt_center(
part, spec, part_indices, center_position, rotation_tensor,
coordinate_system='cylindrical')
# ensure all distances are positive definite
distancess = np.abs(distancess)
if self.dimension_number == 1:
# compute profile along minor axis (Z)
distances = distancess[:, 1]
other_distances = distancess[:, 0]
elif self.dimension_number == 2:
# compute profile along major axes (R)
distances = distancess[:, 0]
other_distances = distancess[:, 1]
if (other_axis_distance_limits is not None and
(min(other_axis_distance_limits) > 0 or
max(other_axis_distance_limits) < Inf)):
masks = ((other_distances >= min(other_axis_distance_limits)) *
(other_distances < max(other_axis_distance_limits)))
distances = distances[masks]
prop_values = prop_values[masks]
pros[spec] = self.get_sum_profile(distances, prop_values) # defined in DistanceBinClass
props = [pro_prop for pro_prop in pros[species[0]] if 'distance' not in pro_prop]
props_dist = [pro_prop for pro_prop in pros[species[0]] if 'distance' in pro_prop]
if property_name == 'mass':
# create dictionary for baryonic mass
if 'star' in species or 'gas' in species:
spec_new = 'baryon'
pros[spec_new] = {}
for spec in np.intersect1d(species, ['star', 'gas']):
for pro_prop in props:
if pro_prop not in pros[spec_new]:
pros[spec_new][pro_prop] = | np.array(pros[spec][pro_prop]) | numpy.array |
#!/usr/local/bin/python
import argparse
import os
import sys
import pandas as pd
import numpy as np
import time
pd.options.mode.chained_assignment = None
parser = argparse.ArgumentParser(prog='snvScore')
parser.add_argument('SampleBED',type=str,help='Path to the mosdepth per-base BED output')
parser.add_argument('SNVGermlineTXT',type=str,help='Path to Clivar-generated table with pathogenic germline SNVs')
parser.add_argument('SNVSomaticTXT',type=str,help='Path to Clivar-generated table with pathogenic somatic SNVs')
parser.add_argument('Threshold',type=int,nargs='?',help='SNV coverage quality threshold (optional, positive)',default=0)
args = parser.parse_args()
sample_name = args.SampleBED
while sample_name.find('/')!=-1:
sample_name = sample_name[sample_name.find('/')+1:]
def snv_coverage(snv,chrom_cover):
snv = snv.dropna()
snv['coverage']=0.0
snv=snv.drop_duplicates()
snv = snv.reset_index(drop=True)
cover_reg = chrom_cover[(chrom_cover.end>snv.position.iloc[0]) & (chrom_cover.start<=snv.position.iloc[-1])]
cover_reg = cover_reg.reset_index(drop=True)
for ind in snv.index:
buf = cover_reg[(cover_reg.end>snv.position[ind]) & (cover_reg.start<=snv.position[ind])]
snv.coverage[ind] = buf.coverage
return snv
def CatchChromoRegs(BED_fname,chrom_names):
BED = open(BED_fname, 'rt')
# chrom_names = ['chr1', 'chr2', 'chr3', 'chr4', 'chr5', 'chr6', 'chr7', 'chr8',
# 'chr9', 'chr10', 'chr11', 'chr12', 'chr13', 'chr14', 'chr15',
# 'chr16', 'chr17', 'chr18', 'chr19', 'chr20', 'chr21', 'chr22',
# 'chrX', 'chrY','chrM']
chrom_start_pos = np.zeros(len(chrom_names)+1,dtype='int32')
line_num = 0
for chrom,i in zip(chrom_names,np.arange(len(chrom_names))):
pos_catched = False
while not pos_catched:
line = BED.readline()
line = line[:line.find('\t')]
if line == chrom:
pos_catched = True
chrom_start_pos[i] = line_num
line_num+=1
while line =='chrM':
line = BED.readline()
line = line[:line.find('\t')]
line_num+=1
chrom_start_pos[-1]=line_num-1
return chrom_start_pos
def ExecuteClinicalCoverageDepthCalc(chrom_names,SNVG,SNVS,SampleBED):
snv_cov = pd.DataFrame(columns=['chr','position','coverage','type'])
all_cov = np.array([])
# start = time.time()
res = CatchChromoRegs(SampleBED,chrom_names)
rows = ['' for i in range(24)]
for chrom,chr_num in zip(chrom_names[:-1],np.arange(24)):
# for chrom,chr_num in zip(chrom_names[:3],np.arange(3)):
chrom_cover = pd.read_csv(SampleBED,delimiter='\t',header=None,names=['chr','start','end','coverage'],skiprows=res[chr_num],nrows=res[chr_num+1]-res[chr_num])
all_cov = np.append(all_cov,chrom_cover.coverage.values,axis=0)
snvg_part = SNVG[SNVG.chr==chrom]
snvs_part = SNVS[SNVS.chr==chrom]
if snvg_part.size>0:
snvg_part = snv_coverage(snvg_part,chrom_cover)
snvg_part['type'] = 'germline'
snv_cov=pd.concat([snv_cov,snvg_part])
germ_row = '%8.0f %10.0f %6.0f %6.0f %6.0f '%(len(snvg_part),
np.median(snvg_part.coverage),
np.std(snvg_part.coverage),
np.min(snvg_part.coverage),
np.max(snvg_part.coverage))
else:
germ_row = '%8.0f %10.0f %6.0f %6.0f %6.0f '%(0,0,0,0,0)
if snvs_part.size>0:
snvs_part=snv_coverage(snvs_part,chrom_cover)
snvs_part['type'] = 'somatic'
snv_cov=pd.concat([snv_cov,snvs_part])
soma_row = '%8.0f %10.0f %6.0f %6.0f %6.0f'%(len(snvs_part),
np.median(snvs_part.coverage),
np.std(snvs_part.coverage),
| np.min(snvs_part.coverage) | numpy.min |
import math
import numpy as np
import matplotlib.pyplot as plt
def sigmoid(x):
return 1 / (1 + math.exp(-x))
# Function to know if we have a CCW turn
def CCW(p1, p2, p3):
if (p3[1]-p1[1])*(p2[0]-p1[0]) >= (p2[1]-p1[1])*(p3[0]-p1[0]):
return True
return False
# Main function:
def create_convex_hull(S):
"""takes in an [np array] of points!
and return a convex hull
"""
n = len(S)
P = [None] * n
l = np.where(S[:,0] == np.min(S[:,0]))
pointOnHull = S[l[0][0]]
i = 0
while True:
P[i] = pointOnHull
endpoint = S[0]
for j in range(1,n):
if (endpoint[0] == pointOnHull[0] and endpoint[1] == pointOnHull[1]) or not CCW(S[j],P[i],endpoint):
endpoint = S[j]
i = i + 1
pointOnHull = endpoint
if endpoint[0] == P[0][0] and endpoint[1] == P[0][1]:
break
for i in range(n):
if P[-1] is None:
del P[-1]
return np.array(P)
def euclidean_dist(pos1, pos2):
return math.sqrt((pos1[0] - pos2[0]) ** 2 + (pos1[1] - pos2[1]) ** 2)
def compute_centroid(vertices):
"""
helper function:
input:
vertices: a list of vertices of a polygon
under the assumption that all vertices are ordered either clockwise/counterclockwise
output:
centroid: position of (x, y) tuple of the polygon relative to the local origin of polygon.
"""
c_x = 0
c_y = 0
area = 0
n = len(vertices)
for i in range(n):
curr = vertices[(i - n) % n]
next = vertices[(i + 1 - n) % n]
diff = (curr[0] * next[1] - curr[1] * next[0])
c_x += (curr[0] + next[0]) * diff
c_y += (curr[1] + next[1]) * diff
area += diff
area = area / 2
c_x = c_x / (6 * area)
c_y = c_y / (6 * area)
return c_x, c_y
def compute_area(vertices):
"""
helper function:
input:
vertices: a list of vertices of a polygon
under the assumption that all vertices are ordered either clockwise/counterclockwise
output:
centroid: position of (x, y) tuple of the polygon relative to the local origin of polygon.
"""
c_x = 0
c_y = 0
area = 0
n = len(vertices)
for i in range(n):
curr = vertices[(i - n) % n]
next = vertices[(i + 1 - n) % n]
diff = (curr[0] * next[1] - curr[1] * next[0])
c_x += (curr[0] + next[0]) * diff
c_y += (curr[1] + next[1]) * diff
area += diff
area = area / 2
return abs(area)
def normalize(vector):
"""
helper function:
input:
vector: (x, y) force vector
output:
vector: (x, y) force vector with normalized magnitude 1
"""
mag = math.sqrt(vector[0] ** 2 + vector[1] ** 2)+1e-6
return vector[0] / mag, vector[1] / mag
def normalize_vector(x, eps=1e-9):
mean = np.mean(x)
std = np.std(x)
x = (x - mean)/(std+eps)
return x
def side_of_point_on_line(start_pt, end_pt, query_pt):
det = (end_pt[0] - start_pt[0]) * (query_pt[1] - start_pt[1]) - (end_pt[1] - start_pt[1]) * (query_pt[0] - start_pt[0])
if det > 0:
return 1
elif det < 0:
return -1
else:
return 0
def pointToLineDistance(e1, e2, p1):
numerator = np.abs((e2[1] - e1[1])*p1[0] - (e2[0] - e1[0])*p1[1] + e2[0]*e1[1] - e1[0]*e2[1])
normalization = np.sqrt((e2[1] - e1[1])**2 + (e2[0] - e1[0])**2)
return numerator/normalization
def scalarProject(start_pt, end_pt, point):
a = np.array(point) - np.array(start_pt)
unit_b = normalize(np.array(end_pt) - np.array(start_pt))
return a[0]*unit_b[0]+a[1]*unit_b[1]
def projectedPtToStartDistance(e1, e2, p1):
d1 = pointToLineDistance(e1, e2, p1)
d2 = euclidean_dist(e1, p1)
if abs(d1) > abs(d2):
return None
return math.sqrt(d2 ** 2 - d1 ** 2)
def two_line_intersect(e1, e2, e3, e4):
denom = (e1[0]-e2[0])*(e3[1]-e4[1]) - (e1[1]-e2[1])*(e3[0]-e4[0])
f1 = (e1[0]*e2[1] - e1[1]*e2[0])
f2 = (e3[0]*e4[1] - e3[1]*e4[0])
if denom == 0:
return None
pt = ((f1*(e3[0] - e4[0]) - f2 * (e1[0] - e2[0])) / (denom+1e-6), (f1*(e3[1] - e4[1]) - f2 * (e1[1] - e2[1]))/(denom+1e-6))
kap = np.dot(np.array(pt) - np.array(e3), np.array(e4) - np.array(e3))
kab = np.dot(np.array(e4) - np.array(e3), np.array(e4) - np.array(e3))
if kap > kab or kap < 0:
return None
else:
return pt
# return pt
def find_max_contact_range(vertices, e1, e2):
p = np.array(e1) - np.array(e2)
vector = (1, -(p[0] / (p[1] + 1e-6)))
# print(vector)
max_contact_range = 0
start_pt = None
max_dist = 0
for i in range(len(vertices)):
dist = pointToLineDistance(e1, e2, vertices[i])
if dist > max_dist:
max_dist = dist
start_pt = vertices[i]
# print(start_pt)
if not start_pt is None:
end_pt = np.array(start_pt) + np.array(vector)
start_pt = np.array(start_pt) - np.array(vector)
intersect_list = set()
for j in range(len(vertices)):
intersect = two_line_intersect(start_pt, end_pt, vertices[j], vertices[(j + 1) % len(vertices)])
# print(vertices[j], vertices[(j + 1) % len(vertices)])
# print(intersect)
if not intersect is None:
add = True
for pt in intersect_list:
if euclidean_dist(pt, intersect) < 0.01:
add = False
if add:
intersect_list.add(intersect)
# print(intersect_list)
if len(intersect_list) == 2:
# print(intersect_list)
intersect_list = list(intersect_list)
contact_range = euclidean_dist(intersect_list[0], intersect_list[1])
if contact_range > max_contact_range:
max_contact_range = contact_range
# for i in range(len(vertices)):
# perp_end_pt = np.array(vertices[i]) + np.array(vector)
# intersect_list = []
# for j in range(len(vertices)):
# intersect = two_line_intersect(vertices[i], perp_end_pt, vertices[j], vertices[(j + 1) % len(vertices)])
# if not intersect is None:
# intersect_list.append(intersect)
# print(intersect_list)
# if len(intersect_list) == 2:
# # print(intersect_list)
# contact_range = euclidean_dist(intersect_list[0], intersect_list[1])
# if contact_range > max_contact_range:
# max_contact_range = contact_range
return max_contact_range, list(intersect_list)
def find_collision_dist_convex_hull(start_pt, vector, centroid, vertices):
abs_vertices = np.array(vertices) + np.array(centroid)
end_pt = | np.array(start_pt) | numpy.array |
"""
bitmap utils and much of the ctc code modified
From <NAME>, Rakesh and <NAME>
"""
# Author: <NAME>
# License: BSD 3-clause
from theano import tensor
from scipy import linalg
import theano
import numpy as np
import matplotlib.pyplot as plt
eps = 1E-12
characters = np.array([
0x0,
0x808080800080000,
0x2828000000000000,
0x287C287C280000,
0x81E281C0A3C0800,
0x6094681629060000,
0x1C20201926190000,
0x808000000000000,
0x810202010080000,
0x1008040408100000,
0x2A1C3E1C2A000000,
0x8083E08080000,
0x81000,
0x3C00000000,
0x80000,
0x204081020400000,
0x1824424224180000,
0x8180808081C0000,
0x3C420418207E0000,
0x3C420418423C0000,
0x81828487C080000,
0x7E407C02423C0000,
0x3C407C42423C0000,
0x7E04081020400000,
0x3C423C42423C0000,
0x3C42423E023C0000,
0x80000080000,
0x80000081000,
0x6186018060000,
0x7E007E000000,
0x60180618600000,
0x3844041800100000,
0x3C449C945C201C,
0x1818243C42420000,
0x7844784444780000,
0x3844808044380000,
0x7844444444780000,
0x7C407840407C0000,
0x7C40784040400000,
0x3844809C44380000,
0x42427E4242420000,
0x3E080808083E0000,
0x1C04040444380000,
0x4448507048440000,
0x40404040407E0000,
0x4163554941410000,
0x4262524A46420000,
0x1C222222221C0000,
0x7844784040400000,
0x1C222222221C0200,
0x7844785048440000,
0x1C22100C221C0000,
0x7F08080808080000,
0x42424242423C0000,
0x8142422424180000,
0x4141495563410000,
0x4224181824420000,
0x4122140808080000,
0x7E040810207E0000,
0x3820202020380000,
0x4020100804020000,
0x3808080808380000,
0x1028000000000000,
0x7E0000,
0x1008000000000000,
0x3C023E463A0000,
0x40407C42625C0000,
0x1C20201C0000,
0x2023E42463A0000,
0x3C427E403C0000,
0x18103810100000,
0x344C44340438,
0x2020382424240000,
0x800080808080000,
0x800180808080870,
0x20202428302C0000,
0x1010101010180000,
0x665A42420000,
0x2E3222220000,
0x3C42423C0000,
0x5C62427C4040,
0x3A46423E0202,
0x2C3220200000,
0x1C201804380000,
0x103C1010180000,
0x2222261A0000,
0x424224180000,
0x81815A660000,
0x422418660000,
0x422214081060,
0x3C08103C0000,
0x1C103030101C0000,
0x808080808080800,
0x38080C0C08380000,
0x324C000000,
], dtype=np.uint64)
bitmap = np.unpackbits(characters.view(np.uint8)).reshape(characters.shape[0],
8, 8)
bitmap = bitmap[:, ::-1, :]
chars = " !\"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~"
mapping = {c: i for i, c in enumerate(chars)}
def string_to_image(string):
return np.hstack(np.array([bitmap[mapping[c]] for c in string])).T[:, ::-1]
def string_to_index(string):
return np.asarray([mapping[c] for c in string])
def recurrence_relation(y, y_mask):
# with blank symbol of -1 this falls back to the recurrence that fails
# with repeating symbols!
blank_symbol = -1
n_y = y.shape[0]
blanks = tensor.zeros((2, y.shape[1])) + blank_symbol
ybb = tensor.concatenate((y, blanks), axis=0).T
sec_diag = (tensor.neq(ybb[:, :-2], ybb[:, 2:]) *
tensor.eq(ybb[:, 1:-1], blank_symbol) *
y_mask.T)
# r1: LxL
# r2: LxL
# r3: LxLxB
r2 = tensor.eye(n_y, k=1)
r3 = (tensor.eye(n_y, k=2).dimshuffle(0, 1, 'x') *
sec_diag.dimshuffle(1, 'x', 0))
return r2, r3
def _epslog(x):
return tensor.cast(tensor.log(tensor.clip(x, 1E-12, 1E12)),
theano.config.floatX)
def _log_add(a, b):
max_ = tensor.maximum(a, b)
return (max_ + tensor.log1p(tensor.exp(a + b - 2 * max_)))
def _log_dot_matrix(x, z):
inf = 1E12
log_dot = tensor.dot(x, z)
zeros_to_minus_inf = (z.max(axis=0) - 1) * inf
return log_dot + zeros_to_minus_inf
def _log_dot_tensor(x, z):
inf = 1E12
log_dot = (x.dimshuffle(1, 'x', 0) * z).sum(axis=0).T
zeros_to_minus_inf = (z.max(axis=0) - 1) * inf
return log_dot + zeros_to_minus_inf.T
def class_batch_to_labeling_batch(y, y_hat, y_hat_mask):
# ??
y_hat = y_hat.dimshuffle(0, 2, 1)
y_hat = y_hat * y_hat_mask.dimshuffle(0, 'x', 1)
batch_size = y_hat.shape[2]
res = y_hat[:, y.astype('int32'), tensor.arange(batch_size)]
return res
def log_path_probs(y, y_mask, y_hat, y_hat_mask):
pred_y = class_batch_to_labeling_batch(y, y_hat, y_hat_mask)
r2, r3 = recurrence_relation(y, y_mask)
def step(log_p_curr, log_p_prev):
p1 = log_p_prev
p2 = _log_dot_matrix(p1, r2)
p3 = _log_dot_tensor(p1, r3)
p123 = _log_add(p3, _log_add(p1, p2))
return (log_p_curr.T +
p123 +
_epslog(y_mask.T))
log_probabilities, _ = theano.scan(
step,
sequences=[_epslog(pred_y)],
outputs_info=[_epslog(tensor.eye(y.shape[0])[0] *
tensor.ones(y.T.shape))])
return log_probabilities
def log_ctc_cost(y, y_mask, y_hat, y_hat_mask):
y_hat_mask_len = tensor.sum(y_hat_mask, axis=0, dtype='int32')
y_mask_len = tensor.sum(y_mask, axis=0, dtype='int32')
log_probs = log_path_probs(y, y_mask, y_hat, y_hat_mask)
batch_size = log_probs.shape[1]
labels_prob = _log_add(
log_probs[y_hat_mask_len - 1, tensor.arange(batch_size),
y_mask_len - 1],
log_probs[y_hat_mask_len - 1, tensor.arange(batch_size),
y_mask_len - 2])
avg_cost = tensor.mean(-labels_prob)
return avg_cost
def as_shared(arr, name=None):
if type(arr) in [float, int]:
if name is not None:
return theano.shared(np.cast[theano.config.floatX](arr))
else:
return theano.shared(np.cast[theano.config.floatX](arr), name=name)
if name is not None:
return theano.shared(value=arr, borrow=True)
else:
return theano.shared(value=arr, name=name, borrow=True)
def np_zeros(shape):
""" Builds a numpy variable filled with zeros """
return np.zeros(shape).astype(theano.config.floatX)
def np_ones(shape):
""" Builds a numpy variable filled with zeros """
return np.ones(shape).astype(theano.config.floatX)
def np_rand(shape, random_state):
# Make sure bounds aren't the same
return random_state.uniform(low=-0.08, high=0.08, size=shape).astype(
theano.config.floatX)
def np_randn(shape, random_state):
""" Builds a numpy variable filled with random normal values """
return (0.01 * random_state.randn(*shape)).astype(theano.config.floatX)
def np_tanh_fan(shape, random_state):
# The . after the 6 is critical! shape has dtype int...
bound = np.sqrt(6. / | np.sum(shape) | numpy.sum |
import os
import cv2
import matplotlib.pyplot as plt
import numpy as np
from skimage import data, color
from skimage.morphology import disk
import skimage.filters.rank as sfr
from PIL import Image, ImageEnhance
from skimage import exposure
import collections
from phasepack import phasecong, phasecongmono, phasesym, phasesymmono
import pyfftw
import math
from math import pi
def Principle_Vessel_Segmentation_for_MA_and_HE(path): #主干血管分割
img = cv2.imread(path)
_, green, _ = cv2.split(img)
r1 = cv2.morphologyEx(green, cv2.MORPH_OPEN, cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5)), iterations=1)
R1 = cv2.morphologyEx(r1, cv2.MORPH_CLOSE, cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5)), iterations=1)
r2 = cv2.morphologyEx(R1, cv2.MORPH_OPEN, cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (11, 11)), iterations=1)
R2 = cv2.morphologyEx(r2, cv2.MORPH_CLOSE, cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (11, 11)), iterations=1)
r3 = cv2.morphologyEx(R2, cv2.MORPH_OPEN, cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (23, 23)), iterations=1)
R3 = cv2.morphologyEx(r3, cv2.MORPH_CLOSE, cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (23, 23)), iterations=1)
morph_contrast_enhanced_green = R3
clahe = cv2.createCLAHE(clipLimit=3.0, tileGridSize=(8, 8))
f5 = clahe.apply(cv2.subtract(morph_contrast_enhanced_green, green))
ret, f6 = cv2.threshold(f5, 30, 255, cv2.THRESH_BINARY) # 用来计算 mask
mask = np.ones(f6.shape[:2], dtype="uint8") * 255
im = cv2.bitwise_and(f6, f6, mask=mask)
ret, fin = cv2.threshold(im, 11, 255, cv2.THRESH_BINARY_INV)
newfin = cv2.erode(fin, cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3)), iterations=1)
kernel = np.ones((3, 3), np.uint8)
erosion = cv2.erode(newfin, kernel)
dst = cv2.dilate(erosion, kernel)
dst = 255 - dst
contours, hierarchy = cv2.findContours(dst, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
for cnt in contours:
if cv2.contourArea(cnt) <= 2000:
cv2.drawContours(dst, [cnt], -1, 0, -1)
dst = cv2.erode(dst, np.ones((2, 2), np.uint8))
dst = cv2.dilate(dst, np.ones((2, 2), np.uint8))
dst = cv2.erode(dst, np.ones((5, 5), np.uint8))
dst = cv2.dilate(dst, np.ones((5, 5), np.uint8))
contours, hierarchy = cv2.findContours(dst, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
for cnt in contours:
if cv2.contourArea(cnt) <= 1500:
cv2.drawContours(dst, [cnt], -1, 0, -1)
return dst
def Principle_Vessel_Segmentation_for_EX_and_SE(path):
img = cv2.imread(path)
_, green, _ = cv2.split(img)
r1 = cv2.morphologyEx(green, cv2.MORPH_OPEN, cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5)), iterations=1)
R1 = cv2.morphologyEx(r1, cv2.MORPH_CLOSE, cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5)), iterations=1)
r2 = cv2.morphologyEx(R1, cv2.MORPH_OPEN, cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (11, 11)), iterations=1)
R2 = cv2.morphologyEx(r2, cv2.MORPH_CLOSE, cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (11, 11)), iterations=1)
r3 = cv2.morphologyEx(R2, cv2.MORPH_OPEN, cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (23, 23)), iterations=1)
R3 = cv2.morphologyEx(r3, cv2.MORPH_CLOSE, cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (23, 23)), iterations=1)
morph_contrast_enhanced_green = R3
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
f5 = clahe.apply(cv2.subtract(morph_contrast_enhanced_green, green))
ret, f6 = cv2.threshold(f5, 15, 255, cv2.THRESH_BINARY) # 用来计算 mask
mask = np.ones(f5.shape[:2], dtype="uint8") * 255
contours, hierarchy = cv2.findContours(f6, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
for cnt in contours:
if cv2.contourArea(cnt) <= 2000:
cv2.drawContours(mask, [cnt], -1, 0, -1)
im = cv2.bitwise_and(f5, f5, mask=mask)
ret, fin = cv2.threshold(im, 15, 255, cv2.THRESH_BINARY_INV)
newfin = cv2.erode(fin, cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3)), iterations=1)
kernel = np.ones((5, 5), np.uint8)
erosion = cv2.erode(newfin, kernel)
dst = cv2.dilate(erosion, kernel)
return dst
def Ocular_boundary_Pextraction(path): #眼球边界检测
img = cv2.imread(path)
_, green, _ = cv2.split(img)
erosion = cv2.dilate(green, kernel=np.ones((3, 3), np.uint8))
dst = cv2.erode(erosion, kernel=np.ones((3, 3), np.uint8))
contours, hierarchy = cv2.findContours(dst, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
if len(contours) != 0:
area = []
for k in range(len(contours)):
area.append(cv2.contourArea(contours[k]))
max_idx = np.argmax(np.array(area))
dst = cv2.drawContours(dst, contours, max_idx, 255, cv2.FILLED)
h = dst.shape[0]
w = dst.shape[1]
c = max(h, w)
cv2.circle(dst, (round(w / 2), round(h / 2)), round(c / 2 - 8), (255, 255, 255), -1)
dst = cv2.erode(dst, kernel= | np.ones((12, 12), np.uint8) | numpy.ones |
import tensorflow as tf
from spinup.algos.ude_td3.core import get_vars
from spinup.utils.logx import Logger
import numpy as np
class UncertaintyModule(object):
"""This class is to provide functions to investigate dropout-based uncertainty change trajectories."""
def __init__(self, act_dim, obs_dim, n_post_action,
obs_set_size, track_obs_set_unc_frequency,
pi, x_ph, a_ph,
pi_dropout_mask_phs, pi_dropout_mask_generator,
rnd_targ_act, rnd_pred_act,
rnd_targ_cri, rnd_pred_cri,
logger_kwargs,
tf_var_scope_main='main', tf_var_scope_target='target', tf_var_scope_unc='uncertainty',
uncertainty_type='dropout'):
self.act_dim = act_dim
self.obs_dim = obs_dim
self.n_post_action = n_post_action
# policy
self.pi = pi
self.x_ph = x_ph
self.a_ph = a_ph
# dropout
self.pi_dropout_mask_phs = pi_dropout_mask_phs
self.pi_dropout_mask_generator = pi_dropout_mask_generator
# rnd
self.rnd_targ_act = rnd_targ_act
self.rnd_pred_act = rnd_pred_act
self.rnd_targ_cri = rnd_targ_cri
self.rnd_pred_cri = rnd_pred_cri
self.obs_set_size = obs_set_size
self.obs_set_is_empty = True
self.track_obs_set_unc_frequency = track_obs_set_unc_frequency
self.tf_var_scope_main = tf_var_scope_main
self.tf_var_scope_target = tf_var_scope_target
self.tf_var_scope_unc = tf_var_scope_unc
self.uncertainty_logger = Logger(output_fname='{}_uncertainty.txt'.format(uncertainty_type),
**logger_kwargs)
self.sample_logger = Logger(output_fname='{}_sample_observation.txt'.format(uncertainty_type),
**logger_kwargs)
# TODO: target policy
def uncertainty_policy_update_targ(self,sess):
"""Update uncertainty policy to current policy"""
sess.run(tf.group([tf.assign(v_unc, v_main)
for v_main, v_unc in
zip(get_vars(self.tf_var_scope_target), get_vars(self.tf_var_scope_unc))]))
def uncertainty_policy_update(self, sess):
"""Update uncertainty policy to current policy"""
sess.run(tf.group([tf.assign(v_unc, v_main)
for v_main, v_unc in zip(get_vars(self.tf_var_scope_main), get_vars(self.tf_var_scope_unc))]))
def sample_obs_set_from_replay_buffer(self, replay_buffer):
"""Sample an obs set from replay buffer."""
self.obs_set = replay_buffer.sample_batch(self.obs_set_size)['obs1']
self.obs_set_is_empty = False
# Save sampled observations
for i, o in enumerate(self.obs_set):
self.sample_logger.log_tabular('Observation', i)
# import pdb; pdb.set_trace()
for dim, o_i in enumerate(o):
self.sample_logger.log_tabular('o_{}'.format(dim), o_i)
self.sample_logger.dump_tabular(print_data=False)
def calculate_obs_set_uncertainty(self, sess, epoch, step):
self.uncertainty_logger.log_tabular('Epoch', epoch)
self.uncertainty_logger.log_tabular('Step', step)
for obs_i, obs in enumerate(self.obs_set):
# Calculate uncertainty
a_post = self.get_post_samples(obs, sess, step)
a_cov = np.cov(a_post, rowvar=False)
for unc_i, unc_v in enumerate(np.array(a_cov).flatten(order='C')):
self.uncertainty_logger.log_tabular('Obs{}_unc_{}'.format(obs_i, unc_i), unc_v)
# Calculate RND prediction error
rnd_targ, rnd_pred, rnd_pred_error = self.calculate_actor_RND_pred_error(obs, sess)
for rnd_i in range(self.act_dim):
self.uncertainty_logger.log_tabular('Obs{}_rnd_t_{}'.format(obs_i, rnd_i), rnd_targ[rnd_i])
self.uncertainty_logger.log_tabular('Obs{}_rnd_p_{}'.format(obs_i, rnd_i), rnd_pred[rnd_i])
self.uncertainty_logger.log_tabular('Obs{}_rnd_error'.format(obs_i), rnd_pred_error)
self.uncertainty_logger.dump_tabular(print_data=False)
def calculate_actor_RND_pred_error(self, obs, sess):
feed_dictionary = {self.x_ph: obs.reshape(1, -1)}
targ, pred = sess.run([self.rnd_targ_act, self.rnd_pred_act], feed_dict=feed_dictionary)
pred_error = np.sqrt(np.sum((pred-targ)**2))
return targ[0], pred[0], pred_error
def calculate_critic_RND_pred_error(self, obs, act, sess):
feed_dictionary = {self.x_ph: obs.reshape(1, -1), self.a_ph: act.reshape(1, -1)}
targ, pred = sess.run([self.rnd_targ_cri, self.rnd_pred_cri], feed_dict=feed_dictionary)
pred_error = np.sqrt(np.sum(pred-targ)**2)
return targ[0], pred[0], pred_error
def get_post_samples(self, obs, sess):
"""Return a post sample matrix for an observation."""
return np.zeros(self.n_post_action, self.act_dim)
class DropoutUncertaintyModule(UncertaintyModule):
"""This class is to provide functions to investigate dropout-based uncertainty change trajectories."""
def __init__(self, act_dim, obs_dim, n_post_action,
obs_set_size, track_obs_set_unc_frequency,
x_ph, a_ph, pi, q1, q2,
pi_dropout_mask_phs, pi_dropout_mask_generator,
q1_dropout_mask_phs, q1_dropout_mask_generator,
q2_dropout_mask_phs, q2_dropout_mask_generator,
rnd_targ_act, rnd_pred_act,
rnd_targ_cri, rnd_pred_cri,
logger_kwargs,
tf_var_scope_main='main', tf_var_scope_target='target', tf_var_scope_unc='uncertainty'):
super().__init__(act_dim, obs_dim, n_post_action,
obs_set_size, track_obs_set_unc_frequency,
pi, x_ph, a_ph,
pi_dropout_mask_phs, pi_dropout_mask_generator,
rnd_targ_act, rnd_pred_act,
rnd_targ_cri, rnd_pred_cri,
logger_kwargs, tf_var_scope_main, tf_var_scope_target, tf_var_scope_unc, 'dropout')
self.q1 = q1
self.q2 = q2
self.q1_dropout_mask_phs = q1_dropout_mask_phs
self.q1_dropout_mask_generator = q1_dropout_mask_generator
self.q2_dropout_mask_phs = q2_dropout_mask_phs
self.q2_dropout_mask_generator = q2_dropout_mask_generator
self.dropout_masks_set_pi = {i:pi_dropout_mask_generator.generate_dropout_mask() for i in range(n_post_action)}
self.dropout_masks_set_q1 = {i:q1_dropout_mask_generator.generate_dropout_mask() for i in range(n_post_action)}
self.dropout_masks_set_q2 = {i:q2_dropout_mask_generator.generate_dropout_mask() for i in range(n_post_action)}
self.delayed_dropout_masks_update = False
self.delayed_dropout_masks_update_freq = 1000
def uncertainty_pi_dropout_masks_update(self):
"""Update uncertainty dropout_masks."""
self.dropout_masks_set_pi = {i: self.pi_dropout_mask_generator.generate_dropout_mask() for i in
range(self.n_post_action)}
def uncertainty_q_dropout_masks_update(self):
"""Update uncertainty dropout_masks."""
self.dropout_masks_set_q1 = {i: self.q1_dropout_mask_generator.generate_dropout_mask() for i in
range(self.n_post_action)}
self.dropout_masks_set_q2 = {i: self.q2_dropout_mask_generator.generate_dropout_mask() for i in
range(self.n_post_action)}
def get_post_samples(self, obs, sess, step_index):
"""Return a post sample matrix for an observation."""
feed_dictionary = {self.x_ph: obs.reshape(1, -1)}
a_post = | np.zeros((self.n_post_action, self.act_dim)) | numpy.zeros |
import sys
import numpy as np
from src.qrl import QRL
from src.tools.tools import timeit
from src.environments.Game2Enemies import Game2Enemies
from src.tools.helpers import stepToString
from defines import *
if len(sys.argv) < 5:
# initialize standard parameters
total_episodes = NUM_EPISODES
learning_rate = LEARNING_RATE
discount_rate = DISCOUNT_RATE
decay_rate = DECAY_RATE
else:
# initialize with given parameter-values
total_episodes = int(sys.argv[1])
learning_rate = float(sys.argv[2])
discount_rate = float(sys.argv[3])
decay_rate = float(sys.argv[4])
qrl = QRL.QRL(env=Game2Enemies(map_name=MAP_NAME), learning_rate=learning_rate, discount_rate=discount_rate, decay_rate=decay_rate)
exec_time = timeit(qrl.run, [total_episodes, "qtables/190427_20"], 1)
# store exec_time in file
with open("performance.csv", 'a') as file:
s = "%i,%.2f,%.2f,%.5f,%f;\n" % (total_episodes, learning_rate, discount_rate, decay_rate, exec_time)
file.write(s)
if SHOW_QTABLE:
print("=== Q-Table ===============================")
qrl.loadFromFile()
qrl.qtable.show()
if SHOW_MAP:
print("====== MAP Layout ==================\n")
qrl.environment.reset()
qrl.environment.render()
print("\n")
numSteps = []
total_rewards = np.zeros(NUM_TESTS)
for i in range(NUM_TESTS):
steps = qrl.test(render=False)
numSteps.append(len(steps))
for step in steps:
total_rewards[i] += step[3]
if SHOW_ONLY_SUBPAR and total_rewards[i] >= THRESHOLD:
continue
if SHOW_TESTS:
print("===== Test Game %i =====================" % (i+1))
for j in range(len(steps)):
output = stepToString(steps[j])
print(j if j >= 10 else "%i " % j, output)
print("Total Reward: %i\n" % total_rewards[i])
print("\n Average Number of Steps taken %.2f" % np.mean(numSteps))
print("\nMedian Reward: %.2f; Mean Reward: %.2f" % ( | np.median(total_rewards) | numpy.median |
"""Unified interface to all dynamic graph model experiments"""
import math
import logging
import time
import sys
import os
import random
import argparse
from tqdm import tqdm
import torch
import torch.nn as nn
import torch.optim as optim
import pandas as pd
import numpy as np
import pickle
from sklearn.metrics import average_precision_score
from sklearn.metrics import f1_score
from sklearn.metrics import roc_auc_score
from sklearn.metrics import mean_absolute_error
# from utils import EarlyStopMonitor
from module import TGAN
from graph import NeighborFinder
from kde import kde_fair
class LR(torch.nn.Module):
def __init__(self, dim, drop=0.3):
super().__init__()
self.fc_1 = torch.nn.Linear(dim, 80)
self.fc_2 = torch.nn.Linear(80, 10)
self.fc_3 = torch.nn.Linear(10, 1)
self.act = torch.nn.ReLU()
self.dropout = torch.nn.Dropout(p=drop, inplace=True)
def forward(self, x):
x = self.act(self.fc_1(x))
x = self.dropout(x)
x = self.act(self.fc_2(x))
x = self.dropout(x)
return self.fc_3(x).squeeze(dim=1)
class Adversary(nn.Module):
def __init__(self, n_input, n_sensitive, n_hidden=32):
super(Adversary, self).__init__()
self.network = nn.Sequential(
nn.Linear(n_input, n_hidden),
nn.ReLU(),
nn.Linear(n_hidden, n_hidden),
nn.ReLU(),
nn.Linear(n_hidden, n_hidden),
nn.ReLU(),
nn.Linear(n_hidden, n_sensitive),
)
def forward(self, x):
return torch.sigmoid(self.network(x))
def pretrain_adversary(idx_list, train_src_l, train_ts_l, n_label, Sn_feat, \
clf, adv, tgan, adv_optimizer, adv_criterion, epochs):
num_test_instance = len(train_src_l)
num_test_batch = math.ceil(num_test_instance / BATCH_SIZE)
for epoch in tqdm(range(epochs)):
np.random.shuffle(idx_list)
tgan = tgan.eval()
clf= clf.eval()
adv = adv.train()
#num_batch
for k in range(num_test_batch):
s_idx = k * BATCH_SIZE
e_idx = min(num_instance - 1, s_idx + BATCH_SIZE)
src_l_cut = train_src_l[s_idx:e_idx]
ts_l_cut = train_ts_l[s_idx:e_idx]
# label_l_cut = train_label_l[s_idx:e_idx]
label_l_cut = n_label[src_l_cut]
Sn_feat_cut = Sn_feat[src_l_cut]
adv_optimizer.zero_grad()
with torch.no_grad():
src_embed = tgan.tem_conv(src_l_cut, ts_l_cut, NODE_LAYER)
src_label = torch.from_numpy(label_l_cut).float().to(device)
src_Sn_feat = torch.from_numpy(Sn_feat_cut).float().to(device)
lr_prob = clf(src_embed).sigmoid()
lr_prob = torch.unsqueeze(lr_prob, 1)
p_z = adv(lr_prob).flatten()
adv_loss = adv_criterion(p_z, src_Sn_feat) * args.hyper_pent
adv_loss.backward()
adv_optimizer.step()
return adv
def pretrain_classifier(idx_list, train_src_l, train_ts_l, n_label, Sn_feat, \
clf, tgan, clf_optimizer, clf_criterion, epochs):
num_test_instance = len(train_src_l)
num_test_batch = math.ceil(num_test_instance / BATCH_SIZE)
for epoch in tqdm(range(epochs)):
np.random.shuffle(idx_list)
tgan = tgan.eval()
clf = clf.train()
#num_batch
for k in range(num_test_batch):
s_idx = k * BATCH_SIZE
e_idx = min(num_instance - 1, s_idx + BATCH_SIZE)
src_l_cut = train_src_l[s_idx:e_idx]
ts_l_cut = train_ts_l[s_idx:e_idx]
# label_l_cut = train_label_l[s_idx:e_idx]
label_l_cut = n_label[src_l_cut]
Sn_feat_cut = Sn_feat[src_l_cut]
clf_optimizer.zero_grad()
with torch.no_grad():
src_embed = tgan.tem_conv(src_l_cut, ts_l_cut, NODE_LAYER)
src_label = torch.from_numpy(label_l_cut).float().to(device)
src_Sn_feat = torch.from_numpy(Sn_feat_cut).float().to(device)
lr_prob = clf(src_embed).sigmoid()
lr_loss = clf_criterion(lr_prob, src_label)
lr_loss.backward()
clf_optimizer.step()
return clf
random.seed(222)
np.random.seed(222)
torch.manual_seed(222)
### Argument and global variables
parser = argparse.ArgumentParser('Interface for TGAT experiments on node classification')
parser.add_argument('-d', '--data', type=str, help='data sources to use, try wikipedia or reddit', default='wikipedia')
parser.add_argument('--bs', type=int, default=30, help='batch_size')
parser.add_argument('--prefix', type=str, default='')
parser.add_argument('--n_degree', type=int, default=50, help='number of neighbors to sample')
parser.add_argument('--n_neg', type=int, default=1)
parser.add_argument('--n_head', type=int, default=2)
parser.add_argument('--n_epoch', type=int, default=15, help='number of epochs')
parser.add_argument('--n_layer', type=int, default=2)
parser.add_argument('--lr', type=float, default=3e-4)
parser.add_argument('--tune', action='store_true', help='parameters tunning mode, use train-test split on training data only.')
parser.add_argument('--drop_out', type=float, default=0.1, help='dropout probability')
parser.add_argument('--gpu', type=int, default=0, help='idx for the gpu to use')
parser.add_argument('--node_dim', type=int, default=None, help='Dimentions of the node embedding')
parser.add_argument('--time_dim', type=int, default=None, help='Dimentions of the time embedding')
parser.add_argument('--agg_method', type=str, choices=['attn', 'lstm', 'mean'], help='local aggregation method', default='attn')
parser.add_argument('--attn_mode', type=str, choices=['prod', 'map'], default='prod')
parser.add_argument('--time', type=str, choices=['time', 'pos', 'empty'], help='how to use time information', default='time')
parser.add_argument('--new_node', action='store_true', help='model new node')
parser.add_argument('--uniform', action='store_true', help='take uniform sampling from temporal neighbors')
parser.add_argument('--running_times', type=int, default=5, help='number of running times')
parser.add_argument('--day_times', type=int, default=1, help='number of recording day in the dataset')
parser.add_argument('--features_type', type=str, default='s', help='type of sensitive attributes (h or s)')
parser.add_argument('--clf', type=str, choices=['clf', 'reg'], default='clf', help='nodel classificaltion/regression')
parser.add_argument('--sens_bn', type=str, choices=['yes', 'no'], default='no', help='sensitive attributes binary')
parser.add_argument('--hyper_pent', type=float, default=1.0, help='Hyperparmeters for penalty')
try:
args = parser.parse_args()
except:
parser.print_help()
sys.exit(0)
BATCH_SIZE = args.bs
NUM_NEIGHBORS = args.n_degree
NUM_NEG = 1
NUM_EPOCH = args.n_epoch
NUM_HEADS = args.n_head
DROP_OUT = args.drop_out
GPU = args.gpu
UNIFORM = args.uniform
NEW_NODE = args.new_node
USE_TIME = args.time
AGG_METHOD = args.agg_method
ATTN_MODE = args.attn_mode
SEQ_LEN = NUM_NEIGHBORS
DATA = args.data
NUM_LAYER = args.n_layer
LEARNING_RATE = args.lr
NODE_LAYER = 1
NODE_DIM = args.node_dim
TIME_DIM = args.time_dim
RUNNING_TIME = args.running_times
time_duration = args.day_times
features_type = args.features_type
hyper_pent = args.hyper_pent
# code_root_path = './code/'
code_root_path = './'
data_root_path = '/data/zhimengj/dataset/Harris/'
### Load data and train val test split
if DATA=='harris':
g_df = pd.read_csv(data_root_path + '{}_edge_{}.csv'.format(DATA, time_duration))
e_feat = np.load(data_root_path + '{}_edge_{}.npy'.format(DATA,time_duration))
# print(f'e_feat={e_feat.shape}')
n_feat = np.load(data_root_path + '{}_node.npy'.format(DATA))
if args.clf=='clf':
n_label = np.load(data_root_path + '{}_Ynode.npy'.format(DATA))
else:
n_label = np.load(data_root_path + '{}_Ynode2.npy'.format(DATA))
if DATA=='harris':
val_time, test_time = 0.60 * time_duration * 6, 0.8 * time_duration * 6
else:
val_time, test_time = list(np.quantile(g_df.ts, [0.60, 0.80]))
# print(f'val_time={val_time}')
# print(f'test_time={test_time}')
if DATA=='harris':
Sn_feat = pickle.load( open(data_root_path + '{}_Snode.p'.format(DATA), "rb") )
SHn_feat = (Sn_feat > np.mean(Sn_feat)).astype(int)
# val_time, test_time = list(np.quantile(g_df.ts, [0.70, 0.85]))
src_l = g_df.u.values
dst_l = g_df.i.values
e_idx_l = g_df.idx.values
# label_l = g_df.label.values
ts_l = g_df.ts.values
max_src_index = src_l.max()
max_idx = max(src_l.max(), dst_l.max())
total_node_set = set(np.unique(np.hstack([g_df.u.values, g_df.i.values])))
def eval_epoch_clf(src_l, dst_l, ts_l, n_label, batch_size, lr_model, tgan, num_layer=NODE_LAYER):
val_acc, val_ap, val_f1, val_auc = [], [], [], []
pred_prob = np.zeros(len(src_l))
loss = 0
num_instance = len(src_l)
num_batch = math.ceil(num_instance / batch_size)
with torch.no_grad():
lr_model.eval()
tgan.eval()
for k in range(num_batch):
s_idx = k * batch_size
e_idx = min(num_instance - 1, s_idx + batch_size)
src_l_cut = src_l[s_idx:e_idx]
# dst_l_cut = dst_l[s_idx:e_idx]
ts_l_cut = ts_l[s_idx:e_idx]
# label_l_cut = label_l[s_idx:e_idx]
label_l_cut = n_label[src_l_cut]
size = len(src_l_cut)
src_embed = tgan.tem_conv(src_l_cut, ts_l_cut, num_layer)
src_label = torch.from_numpy(label_l_cut).float().to(device)
lr_prob = lr_model(src_embed).sigmoid()
loss += clf_criterion_eval(lr_prob, src_label).item()
pred_prob[s_idx:e_idx] = lr_prob.cpu().numpy()
pred_label = pred_prob > 0.5
auc_roc = roc_auc_score(n_label[src_l], pred_prob)
val_acc = (pred_label == n_label[src_l]).mean()
val_ap = average_precision_score(n_label[src_l], pred_prob)
# val_f1 = f1_score(label_l, pred_prob)
val_auc = roc_auc_score(n_label[src_l], pred_prob)
return np.around(val_acc, 4), | np.around(val_ap, 4) | numpy.around |
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 17 11:13:31 2015
@author: kremin
"""
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import gridspec
import scipy.signal as signal
import pdb
def plot_skylines(axi,red_est):
'''Take an axis object and plot the location of skylines
as well as emission and absorption lines at a redshift of red_est.'''
# Edited to include sdss lines
# Converting to values used in code SDSS Wave /1.00027 = these waves
# Red = HK
# Purple = OII, Halpha
# Black = sky
# Blue = Emission
# Orange = Absorption\
emission_lines = np.array([2798.4,4101.8,4340.5,4861.4,4959.,5006.9,6548.1,
6583.5,6716.5,6730.9])*(1.+red_est)
sky_lines = [5577.,5893.,6300.,7244.,7913.7,8344.6,8827.1]
absorption_lines = np.array([4304.4,5175.3,5894.,8498.1,8542.1,8662.2])*(1.+red_est)
pspecs = [] #np.ndarray((4+len(emission_lines)+len(absorption_lines),))
pspecs.append(axi.axvline(3726.1*(1.+red_est),ls='--',alpha=0.7,c='purple'))
pspecs.append(axi.axvline(6562.8*(1.+red_est),ls='--',alpha=0.7,c='purple'))
pspecs.append(axi.axvline(3933.7*(1.+red_est),ls='--',alpha=0.7,c='red'))
pspecs.append(axi.axvline(3968.5*(1.+red_est),ls='--',alpha=0.7,c='red'))
for vlin in emission_lines:
pspecs.append(axi.axvline(vlin,ls='--',alpha=0.7,c='blue'))
for vlin in sky_lines:
axi.axvline(vlin,ls='-.',alpha=0.7,c='black')
for vlin in absorption_lines:
pspecs.append(axi.axvline(vlin,ls='--',alpha=0.7,c='orange'))
return pspecs
def summary_plot(waves, flux, templ_waves, template,zest,z_test,corrs,plt_name,frame_name,mock_photoz=None):
'''Display the spectrum and reference lines for the best fitting redshift.'''
cont_subd_flux = flux - signal.medfilt(flux,171)
cont_subd_temp_flux = template - signal.medfilt(template,171)
cont_subd_flux = cont_subd_flux/np.std(cont_subd_flux)
cont_subd_temp_flux = cont_subd_temp_flux/np.std(cont_subd_temp_flux)
temp_shifted_waves = templ_waves*(1+zest)
plt.figure(figsize=(10, 10))
gs = gridspec.GridSpec(3, 1, height_ratios=[2, 1, 1])
plt.tight_layout()
ax = plt.subplot(gs[0])
plt.subplots_adjust(bottom=0.1)
#pdb.set_trace()
alp = 0.5
ax.plot(waves,flux,label='Target {}'.format(frame_name),alpha=alp+0.1)
tl,th = np.nanquantile(template,[0.2,0.8])
fl,fh = np.nanquantile(flux,[0.2,0.8])
modtemplate = 0.75*(fh-fl)*template/(th-tl)
if np.nanmax(modtemplate)>np.nanmax(flux):
modtemplate = modtemplate*0.5
ax.plot(temp_shifted_waves,modtemplate,alpha=alp,label='SDSS Template')
ax.set_xlim(waves[0],waves[-1])
last_ind = np.max(np.where(temp_shifted_waves<waves[-1]))
shortnd_temp_flux = cont_subd_temp_flux[:last_ind]
if len(shortnd_temp_flux)>0:
ax.set_ylim(np.min([np.nanmin(flux), | np.nanmin(modtemplate) | numpy.nanmin |
import numpy as np
import unittest
from optpy import optimization
class TestFunctionWithApproxJacobian(unittest.TestCase):
def test_function(self):
def f(x):
return np.sum(x**2)
func = optimization.FunctionWithApproxJacobian(f, epsilon=1e-8)
point = np.array([1.0, 2.0, 3.0])
self.assertEqual(func(point), 14.0)
np.testing.assert_allclose(func.jac(point), [[2.0, 4.0, 6.0]])
class TestParameterManager(unittest.TestCase):
def test_build_vector(self):
parameter_manager = optimization.ParameterManager(['x1', 'x2', 'x3'], ['x1', 'x2'],
x1=1.0, x2=np.array([2.0, 3.0]), x3=4.0)
np.testing.assert_allclose(parameter_manager.build_vector(), [1.0, 2.0, 3.0])
np.testing.assert_allclose(parameter_manager.build_vector(x1=4), [4.0, 2.0, 3.0])
def test_extract_parameters(self):
parameter_manager = optimization.ParameterManager(['x1', 'x2', 'x3'], ['x1', 'x2'],
x1=1.0, x2=np.array([2.0, 3.0]), x3=4.0)
x = np.array([5.0, 6.0, 7.0])
params = parameter_manager.extract_parameters(x)
np.testing.assert_allclose(params['x1'], 5.0)
np.testing.assert_allclose(params['x2'], [6.0, 7.0])
np.testing.assert_allclose(params['x3'], 4.0)
def test_build_vector_0d_array(self):
parameter_manager = optimization.ParameterManager(['x1', 'x2', 'x3'], ['x1', 'x2'],
x1=np.array(1.0), x2=np.array([2.0, 3.0]), x3=4.0)
np.testing.assert_allclose(parameter_manager.build_vector(), [1.0, 2.0, 3.0])
np.testing.assert_allclose(parameter_manager.build_vector(x1=4), [4.0, 2.0, 3.0])
def test_extract_parameters_0d_array(self):
parameter_manager = optimization.ParameterManager(['x1', 'x2', 'x3'], ['x1', 'x2'],
x1=np.array(1.0), x2=np.array([2.0, 3.0]), x3=4.0)
x = np.array([5.0, 6.0, 7.0])
params = parameter_manager.extract_parameters(x)
np.testing.assert_allclose(params['x1'], 5.0)
np.testing.assert_allclose(params['x2'], [6.0, 7.0])
np.testing.assert_allclose(params['x3'], 4.0)
class TestKeywordParameterManager(unittest.TestCase):
def test_build_vector(self):
parameter_manager = optimization.KeywordParameterManager({'x1': 1.0,
'x2': np.array([2.0, 3.0]),
'x3': 4.0},
['x1', 'x2'])
np.testing.assert_allclose(parameter_manager.build_vector(), [1.0, 2.0, 3.0])
np.testing.assert_allclose(parameter_manager.build_vector(x1=4), [4.0, 2.0, 3.0])
def test_extract_parameters(self):
parameter_manager = optimization.KeywordParameterManager({'x1': 1.0,
'x2': np.array([2.0, 3.0]),
'x3': 4.0},
['x1', 'x2'])
x = np.array([5.0, 6.0, 7.0])
params = parameter_manager.extract_parameters(x)
np.testing.assert_allclose(params['x1'], 5.0)
np.testing.assert_allclose(params['x2'], [6.0, 7.0])
np.testing.assert_allclose(params['x3'], 4.0)
def test_build_vector_0d_array(self):
parameter_manager = optimization.KeywordParameterManager({'x1': np.array(1.0),
'x2': np.array([2.0, 3.0]),
'x3': 4.0},
['x1', 'x2'])
np.testing.assert_allclose(parameter_manager.build_vector(), [1.0, 2.0, 3.0])
np.testing.assert_allclose(parameter_manager.build_vector(x1=4), [4.0, 2.0, 3.0])
def test_extract_parameters_0d_array(self):
parameter_manager = optimization.KeywordParameterManager({'x1': np.array(1.0),
'x2': np.array([2.0, 3.0]),
'x3': 4.0},
['x1', 'x2'])
x = np.array([5.0, 6.0, 7.0])
params = parameter_manager.extract_parameters(x)
np.testing.assert_allclose(params['x1'], 5.0)
np.testing.assert_allclose(params['x2'], [6.0, 7.0])
np.testing.assert_allclose(params['x3'], 4.0)
class TestWrapParameterManager(unittest.TestCase):
def test_wrap_parameter_manager(self):
parameter_manager = optimization.ParameterManager(['x1', 'x2', 'x3'], ['x1', 'x2'],
x1=1.0, x2=np.array([2.0, 2.0]), x3=1.0)
def f(x1, x2, x3):
return x1+2*x2.sum()+3*x3
new_f = optimization.wrap_parameter_manager(f, parameter_manager)
self.assertEqual(new_f(np.array([1, 2, 3])), 14)
def test_wrap_keyword_parameter_manager(self):
parameter_manager = optimization.KeywordParameterManager({'x1': 1.0,
'x2': np.array([2.0, 2.0]),
'x3': 1.0}, ['x1', 'x2'])
def f(x1, x2, x3):
return x1+2*x2.sum()+3*x3
new_f = optimization.wrap_parameter_manager(f, parameter_manager)
self.assertEqual(new_f(np.array([1, 2, 3])), 14)
class TestMinimize(unittest.TestCase):
method='SLSQP'
def test_simple(self):
parameter_manager = optimization.ParameterManager(['x1', 'x2', 'x3'], ['x1', 'x2'],
x1=1.0, x2=np.array([2.0, 2.0]), x3=1.0)
def f(x1, x2, x3):
return np.sum(x1**2)+np.sum(x2**2)
res = optimization.minimize(f, parameter_manager, method=self.method)
np.testing.assert_allclose(res.x1, 0.0)
np.testing.assert_allclose(res.x2, [0.0, 0.0])
np.testing.assert_allclose(res.x3, 1.0)
def test_with_equality_constraint(self):
parameter_manager = optimization.ParameterManager(['x1', 'x2', 'x3'], ['x1', 'x2'],
x1=1.0, x2=np.array([2.0, 2.0]), x3=1.0)
def f(x1, x2, x3):
return np.sum(x1**2)+np.sum(x2**2)
def constraint(x1, x2, x3):
return x1-1.0
constraints = [{'type': 'eq', 'fun': constraint}]
res = optimization.minimize(f, parameter_manager, method=self.method, constraints = constraints)
np.testing.assert_allclose(res.x1, 1.0)
np.testing.assert_allclose(res.x2, [0.0, 0.0])
np.testing.assert_allclose(res.x3, 1.0)
def test_with_inequality_constraint(self):
parameter_manager = optimization.ParameterManager(['x1', 'x2', 'x3'], ['x1', 'x2'],
x1=1.0, x2=np.array([2.0, 0.0]), x3=1.0)
def f(x1, x2, x3):
return np.sum(x1**2)+np.sum(x2**2)
def constraint(x1, x2, x3):
return x2.sum()-1
constraints = [{'type': 'ineq', 'fun': constraint}]
res = optimization.minimize(f, parameter_manager, method=self.method, constraints = constraints)
np.testing.assert_allclose(res.x1, 0.0, atol=1e-8)
np.testing.assert_allclose(res.x2, [0.5, 0.5])
| np.testing.assert_allclose(res.x3, 1.0) | numpy.testing.assert_allclose |
import pytest
from sympl import (
DataArray, set_direction_names, get_numpy_array,
restore_dimensions, get_numpy_arrays_with_properties,
restore_data_arrays_with_properties, InvalidStateError,
InvalidPropertyDictError)
import numpy as np
import unittest
"""
get_numpy_arrays_with_properties:
- returns numpy arrays in the dict
- those numpy arrays should have same dtype as original data
* even when unit conversion happens
- properly collects dimensions along a direction
- they should actually be the same numpy arrays (with memory) as original data if no conversion happens
* even when units are specified (so long as they match)
- should be the same quantities as requested by the properties
* contain all
* not contain extra
* raise exception if some are missing
- units
* converts if requested and present
* does nothing if not requested whether or not present
* raises exception if not requested or not present
* unit conversion should not modify the input array
- requires "dims" to be specified, raises exception if they aren't
- match_dims_like
* should work if matched dimensions are identical
* should raise exception if matched dimensions are not identical
* should require value to be a quantity in property_dictionary
* should require all A matches to look like B and all B matches to look like A
- should raise ValueError when explicitly specified dimension is not present
- should return a scalar array when called on a scalar DataArray
Test case for when wildcard dimension doesn't match anything - the error message needs to be much more descriptive
e.g. dims=['x', 'y', 'z'] and state=['foo', 'y', 'z']
restore_data_arrays_with_properties:
- should return a dictionary of DataArrays
- DataArray values should be the same arrays as original data if no conversion happens
- properly restores collected dimensions
- if conversion does happen, dtype should be the same as the input
- should return same quantities as requested by the properties
* contain all
* not contain extra
* raise exception if some are missing
- units
* should be the same value as specified in output_properties dict
- requires dims_like to be specified, raises exception if it's not
* returned DataArray should have same dimensions as dims_like object
* exception should be raised if dims_like is wrong (shape is incompatible)
* should return coords like the dims_like quantity
Should add any created exceptions to the docstrings for these functions
"""
def test_get_numpy_array_3d_no_change():
array = DataArray(
np.random.randn(2, 3, 4),
dims=['x', 'y', 'z'],
attrs={'units': ''},
)
numpy_array = get_numpy_array(array, ['x', 'y', 'z'])
assert np.byte_bounds(numpy_array) == np.byte_bounds(array.values)
assert np.all(numpy_array == array.values)
assert numpy_array.base is array.values
def test_get_numpy_array_3d_reverse():
array = DataArray(
np.random.randn(2, 3, 4),
dims=['x', 'y', 'z'],
attrs={'units': ''},
)
numpy_array = get_numpy_array(array, ['z', 'y', 'x'])
assert numpy_array.shape == (4, 3, 2)
assert np.all(np.transpose(numpy_array, (2, 1, 0)) == array.values)
assert np.byte_bounds(numpy_array) == np.byte_bounds(array.values)
assert numpy_array.base is array.values
def test_get_numpy_array_2d_reverse():
array = DataArray(
np.random.randn(2, 3),
dims=['y', 'z'],
attrs={'units': ''},
)
numpy_array = get_numpy_array(array, ['z', 'y'])
assert numpy_array.shape == (3, 2)
assert np.all( | np.transpose(numpy_array, (1, 0)) | numpy.transpose |
"""
pyart.correct.filters
=====================
Functions for creating gate filters (masks) which can be used it various
corrections routines in Py-ART.
.. autosummary::
:toctree: generated/
moment_based_gate_filter
.. autosummary::
:toctree: generated/
:template: dev_template.rst
GateFilter
"""
import numpy as np
from ..config import get_field_name
def moment_based_gate_filter(
radar, ncp_field=None, rhv_field=None, refl_field=None,
min_ncp=0.5, min_rhv=None, min_refl=-20., max_refl=100.0):
"""
Create a filter which removes undesired gates based on moments.
Creates a gate filter in which the following gates are excluded:
* Gates where the reflectivity is outside the interval min_refl, max_refl.
* Gates where the normalized coherent power is below min_ncp.
* Gates where the cross correlation ratio is below min_rhi. Using the
default parameter this filtering is disabled.
* Gates where any of the above three fields are masked or contain
invalid values (NaNs or infs).
* If any of these three fields do not exist in the radar that fields filter
criteria is not applied.
Parameters
----------
radar : Radar
Radar object from which the gate filter will be built.
refl_field, ncp_field, rhv_field : str
Names of the radar fields which contain the reflectivity, normalized
coherent power (signal quality index) and cross correlation ratio
(RhoHV) from which the gate filter will be created using the above
criteria. A value of None for any of these parameters will use the
default field name as defined in the Py-ART configuration file.
min_ncp, min_rhv : float
Minimum values for the normalized coherence power and cross
correlation ratio. Gates in these fields below these limits as well as
gates which are masked or contain invalid values will be excluded and
not used in calculation which use the filter. A value of None will
disable filtering based upon the given field including removing
masked or gates with an invalid value. To disable the thresholding
but retain the masked and invalid filter set the parameter to a value
below the lowest value in the field.
min_refl, max_refl : float
Minimum and maximum values for the reflectivity. Gates outside
of this interval as well as gates which are masked or contain invalid
values will be excluded and not used in calculation which use this
filter. A value or None for one of these parameters will disable the
minimum or maximum filtering but retain the other. A value of None
for both of these values will disable all filtering based upon the
reflectivity including removing masked or gates with an invalid value.
To disable the interval filtering but retain the masked and invalid
filter set the parameters to values above and below the lowest and
greatest values in the reflectivity field.
Returns
-------
gatefilter : :py:class:`GateFilter`
A gate filter based upon the described criteria. This can be
used as a gatefilter parameter to various functions in pyart.correct.
"""
# parse the field parameters
if refl_field is None:
refl_field = get_field_name('reflectivity')
if ncp_field is None:
ncp_field = get_field_name('normalized_coherent_power')
if rhv_field is None:
rhv_field = get_field_name('cross_correlation_ratio')
# filter gates based upon field parameters
gatefilter = GateFilter(radar)
if (min_ncp is not None) and (ncp_field in radar.fields):
gatefilter.exclude_below(ncp_field, min_ncp)
gatefilter.exclude_masked(ncp_field)
gatefilter.exclude_invalid(ncp_field)
if (min_rhv is not None) and (rhv_field in radar.fields):
gatefilter.exclude_below(rhv_field, min_rhv)
gatefilter.exclude_masked(rhv_field)
gatefilter.exclude_invalid(rhv_field)
if refl_field in radar.fields:
if min_refl is not None:
gatefilter.exclude_below(refl_field, min_refl)
gatefilter.exclude_masked(refl_field)
gatefilter.exclude_invalid(refl_field)
if max_refl is not None:
gatefilter.exclude_above(refl_field, max_refl)
gatefilter.exclude_masked(refl_field)
gatefilter.exclude_invalid(refl_field)
return gatefilter
class GateFilter(object):
"""
A class for building a boolean arrays for filtering gates based on
a set of condition typically based on the values in the radar fields.
These filter can be used in various algorithms and calculations within
Py-ART.
See :py:func:`pyart.correct.GateFilter.exclude_below` for method
parameter details.
Parameters
----------
radar : Radar
Radar object from which gate filter will be build.
exclude_based : bool, optional
True, the default and suggested method, will begin with all gates
included and then use the exclude methods to exclude gates based on
conditions. False will begin with all gates excluded from which
a set of gates to include should be set using the include methods.
Attributes
----------
gate_excluded : array, dtype=bool
Boolean array indicating if a gate should be excluded from a
calculation. Elements marked True indicate the corresponding gate
should be excluded. Those marked False should be included.
This is read-only attribute, any changes to the array will NOT
be reflected in gate_included and will be lost when the attribute is
accessed again.
gate_included : array, dtype=bool
Boolean array indicating if a gate should be included in a
calculation. Elements marked True indicate the corresponding gate
should be include. Those marked False should be excluded.
This is read-only attribute, any changes to the array will NOT
be reflected in gate_excluded and will be lost when the attribute is
accessed again.
Examples
--------
>>> import pyart
>>> radar = pyart.io.read('radar_file.nc')
>>> gatefilter = pyart.correct.GateFilter(radar)
>>> gatefilter.exclude_below('reflectivity', 10)
>>> gatefilter.exclude_below('normalized_coherent_power', 0.75)
"""
def __init__(self, radar, exclude_based=True):
""" initialize """
self._radar = radar
shape = (radar.nrays, radar.ngates)
if exclude_based:
# start with all gates included, exclude gates based on a set
# of rules using the exclude_ methods.
self._gate_excluded = np.zeros(shape, dtype=np.bool)
else:
# start with all gates excluded, include gates based on a set
# of rules using the include_ methods.
self._gate_excluded = np.ones(shape, dtype=np.bool)
# Implemetation is based on marking excluded gates stored in the private
# _gate_excluded attribute. The gate_included attribute can be found
# by taking the ones complement of gates_included.
def copy(self):
""" Return a copy of the gatefilter. """
a = GateFilter(self._radar)
a._gate_excluded = self._gate_excluded.copy()
return a
@property
def gate_included(self):
return ~self._gate_excluded.copy()
@property
def gate_excluded(self):
return self._gate_excluded.copy()
def _get_fdata(self, field):
""" Check that the field exists and retrieve field data. """
self._radar.check_field_exists(field)
return self._radar.fields[field]['data']
def _merge(self, marked, op, exclude_masked):
""" Merge an array of marked gates with the exclude array. """
# exclude masked elements in marked by replacing them with the value
# of the exclude_masked flag. This does nothing if marked is a
# non-masked array.
if exclude_masked not in [True, False]:
raise ValueError("exclude_masked must be 'True' or 'False'")
marked = | np.ma.filled(marked, exclude_masked) | numpy.ma.filled |
# nuScenes dev-kit.
# Code written by <NAME>, 2018.
# Licensed under the Creative Commons [see licence.txt]
#from __future__ import annotations
import torch
import numpy as np
from pyquaternion import Quaternion
class Box:
""" Simple data class representing a 3d box including, label, score and velocity. """
def __init__(self, center, size, orientation, label=np.nan, score=np.nan, velocity=(np.nan, np.nan, np.nan),
name=None):
"""
:param center: [<float>: 3]. Center of box given as x, y, z.
:param size: [<float>: 3]. Size of box in width, length, height.
:param orientation: <Quaternion>. Box orientation.
:param label: <int>. Integer label, optional.
:param score: <float>. Classification score, optional.
:param velocity: [<float>: 3]. Box velocity in x, y, z direction.
:param name: <str>. Box name, optional. Can be used e.g. for denote category name.
"""
assert not np.any(np.isnan(center))
assert not np.any(np.isnan(size))
assert len(center) == 3
assert len(size) == 3
# assert type(orientation) == Quaternion
self.center = np.array(center)
self.wlh = np.array(size)
self.orientation = orientation
self.label = int(label) if not np.isnan(label) else label
self.score = float(score) if not np.isnan(score) else score
self.velocity = np.array(velocity)
self.name = name
def __eq__(self, other):
center = np.allclose(self.center, other.center)
wlh = np.allclose(self.wlh, other.wlh)
orientation = np.allclose(self.orientation.elements, other.orientation.elements)
label = (self.label == other.label) or (np.isnan(self.label) and np.isnan(other.label))
score = (self.score == other.score) or (np.isnan(self.score) and np.isnan(other.score))
vel = (np.allclose(self.velocity, other.velocity) or
(np.all(np.isnan(self.velocity)) and np.all(np.isnan(other.velocity))))
return center and wlh and orientation and label and score and vel
def __repr__(self):
repr_str = 'label: {}, score: {:.2f}, xyz: [{:.2f}, {:.2f}, {:.2f}], wlh: [{:.2f}, {:.2f}, {:.2f}], ' \
'rot axis: [{:.2f}, {:.2f}, {:.2f}], ang(degrees): {:.2f}, ang(rad): {:.2f}, ' \
'vel: {:.2f}, {:.2f}, {:.2f}, name: {}'
return repr_str.format(self.label, self.score, self.center[0], self.center[1], self.center[2], self.wlh[0],
self.wlh[1], self.wlh[2], self.orientation.axis[0], self.orientation.axis[1],
self.orientation.axis[2], self.orientation.degrees, self.orientation.radians,
self.velocity[0], self.velocity[1], self.velocity[2], self.name)
def encode(self):
"""
Encodes the box instance to a JSON-friendly vector representation.
:return: [<float>: 16]. List of floats encoding the box.
"""
return self.center.tolist() + self.wlh.tolist() + self.orientation.elements.tolist() + [self.label] + [self.score] + self.velocity.tolist() + [self.name]
@classmethod
def decode(cls, data):
"""
Instantiates a Box instance from encoded vector representation.
:param data: [<float>: 16]. Output from encode.
:return: <Box>.
"""
return Box(data[0:3], data[3:6], Quaternion(data[6:10]), label=data[10], score=data[11], velocity=data[12:15],
name=data[15])
@property
def rotation_matrix(self):
"""
Return a rotation matrix.
:return: <np.float: (3, 3)>.
"""
return self.orientation.rotation_matrix
def translate(self, x):
"""
Applies a translation.
:param x: <np.float: 3, 1>. Translation in x, y, z direction.
:return: <None>.
"""
self.center += x
def rotate(self, quaternion):
"""
Rotates box.
:param quaternion: <Quaternion>. Rotation to apply.
:return: <None>.
"""
self.center = np.dot(quaternion.rotation_matrix, self.center)
self.orientation = quaternion * self.orientation
self.velocity = np.dot(quaternion.rotation_matrix, self.velocity)
def transform(self, transf_matrix):
transformed = np.dot(transf_matrix[0:3,0:4].T, self.center)
self.center = transformed[0:3]/transformed[3]
self.orientation = self.orientation* Quaternion(matrix = transf_matrix[0:3,0:3])
self.velocity = np.dot(transf_matrix[0:3,0:3], self.velocity)
def corners(self, wlh_factor=1.0):
"""
Returns the bounding box corners.
:param wlh_factor: <float>. Multiply w, l, h by a factor to inflate or deflate the box.
:return: <np.float: 3, 8>. First four corners are the ones facing forward.
The last four are the ones facing backwards.
"""
w, l, h = self.wlh * wlh_factor
# 3D bounding box corners. (Convention: x points forward, y to the left, z up.)
x_corners = l / 2 * np.array([1, 1, 1, 1, -1, -1, -1, -1])
y_corners = w / 2 * np.array([1, -1, -1, 1, 1, -1, -1, 1])
z_corners = h / 2 * np.array([1, 1, -1, -1, 1, 1, -1, -1])
corners = | np.vstack((x_corners, y_corners, z_corners)) | numpy.vstack |
########################
# Create a 4-panel plot of photo-z quality for Paper 2
########################
import numpy as np, pylab, ldac, os
from readtxtfile import readtxtfile
########################
def makeHexbin(specz, photz, xlabel = 'Reference Redshift', ylabel = 'Photo-$z$', gridsize = 100, bins = None):
fig = pylab.figure()
ax = fig.add_axes([0.12, 0.12, 0.95 - 0.12, 0.95 - 0.12])
ax.hexbin(specz, photz, gridsize = gridsize, extent=[0, 1.5, 0, 1.5], cmap = pylab.cm.binary, bins=bins)
ax.plot([0, 1.5], [0, 1.5], 'r-', linewidth=1.25)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
return fig
##############################
def cosmos30plot(data = None):
if data is None:
data = {}
if 'bpz' not in data:
data['bpz'] = ldac.openObjectFile('/u/ki/dapple/subaru/COSMOS_PHOTOZ/PHOTOMETRY_W-C-IC_BVRIZ/COSMOS_PHOTOZ.APER.1.CWWSB_capak.list.all.bpz.tab', 'STDTAB')
if 'cosmos' not in data:
data['cosmos'] = ldac.openObjectFile('/u/ki/dapple/nfs12/cosmos/cosmos.cat')
if 'galaxies' not in data:
data['galaxies'] = ldac.openObjectFile('/u/ki/dapple/nfs12/cosmos/simulations/publication/highsn/BVRIZ/bpz.cat', 'STDTAB')
bpz = data['bpz']
cosmos = data['cosmos']
galaxies = data['galaxies']
filter = galaxies['BPZ_ODDS'] > 0.5
galaxies = galaxies.filter(filter)
mbpz = bpz.matchById(galaxies)
mcosmos = cosmos.matchById(mbpz, 'SeqNr', 'id')
fig = makeHexbin(mcosmos['zp_best'], mbpz['BPZ_Z_B'], xlabel=r'COSMOS-30 Photo-$z$', ylabel=r'{\it B}$_{\rm J}${\it V}$_{\rm J}${\it r}$^{+}${\it i}$^{+}${\it z}$^{+}$ Photo-$z$',
gridsize = 50, bins=None)
# fig.axes[0].text(1.1, 0.05, '\emph{Log Color Scale}')
fig.savefig('publication/cosmos30comp.pdf')
return fig, data
###############################
def zcosmosplot(data = None):
if data is None:
data = {}
if 'bpz' not in data:
data['bpz'] = ldac.openObjectFile('/u/ki/dapple/subaru/COSMOS_PHOTOZ/PHOTOMETRY_W-C-IC_BVRIZ/COSMOS_PHOTOZ.APER.1.CWWSB_capak.list.all.bpz.tab', 'STDTAB')
if 'galaxies' not in data:
data['galaxies'] = ldac.openObjectFile('/u/ki/dapple/nfs12/cosmos/simulations/publication/highsn/BVRIZ/bpz.cat', 'STDTAB')
if 'zcat' not in data:
data['zcat'] = ldac.openObjectFile('/u/ki/dapple/nfs12/cosmos/zcosmos.matched.cat')
if 'refbpz' not in data:
data['refbpz'] = ldac.openObjectFile('/u/ki/dapple/subaru/COSMOS_PHOTOZ/PHOTOMETRY_W-C-IC_BVRIZ/bpz.zcosmos.matched.cat', 'STDTAB')
bpz = data['bpz']
galaxies = data['galaxies']
zcat = data['zcat']
refbpz = data['refbpz']
filter = galaxies['BPZ_ODDS'] > 0.5
galaxies = galaxies.filter(filter)
mbpz = bpz.matchById(galaxies)
cat1 = mbpz
cat2 = refbpz
cat1id = 'SeqNr'
cat2id = 'SeqNr'
cat1order = {}
for i, x in enumerate(cat1[cat1id]):
cat1order[x] = i
cat1KeepOrder = []
cat2Keep = []
for x in cat2[cat2id]:
if x in cat1order:
cat1KeepOrder.append(cat1order[x])
cat2Keep.append(True)
else:
cat2Keep.append(False)
cat1keep = np.array(cat1KeepOrder)
cat2keep = | np.array(cat2Keep) | numpy.array |
"""Code for setting up, and running, and collecting data from PV-DER simulations."""
from __future__ import division
import numpy as np
import math
import cmath
import time
import pdb
import six
from pvder.utility_classes import Utilities
from pvder.grid_components import Grid
from pvder.simulation_utilities import SimulationUtilities
#from pvder.simulation_utilities_experimental import SimulationUtilitiesExperimental
from pvder import utility_functions
from pvder import defaults,templates
from pvder.logutil import LogUtil
class DynamicSimulation(Grid,SimulationUtilities,Utilities):
""" Utility class for running simulations."""
count = 0
tStart = 0.0
tInc = defaults.DEFAULT_DELTA_T
DEBUG_SOLVER = False
DEBUG_SIMULATION = False
DEBUG_CONTROLLERS = False
DEBUG_VOLTAGES = False
DEBUG_CURRENTS = False
DEBUG_POWER = False
DEBUG_PLL = False
jac_list = ['SolarPVDERThreePhase','SolarPVDERSinglePhase','SolarPVDERThreePhaseBalanced']
def __init__(self,PV_model,events,gridModel = None,tStop = 0.5,
LOOP_MODE = False,COLLECT_SOLUTION = True,jacFlag = False,
verbosity ='INFO',solverType ='odeint',identifier = None):
"""Creates an instance of `GridSimulation`.
Args:
PV_model: An instance of `SolarPV_DER`.
events: An instance of `SimulationEvents`.
grid_model: An instance of `GridModel` (only need to be suppled in stand alone simulation).
tStop: A scalar specifying the end time for simulation.
tInc: A scalar specifying the time step for simulation.
LOOP_MODE: A boolean specifying whether simulation is run in loop.
"""
try:
DynamicSimulation.count = DynamicSimulation.count + 1 #Increment count to keep track of number of simulation instances
self.name_instance(identifier) #Generate a name for the instance
self.tStop = tStop
self.t = self.t_calc()
self.PV_model = PV_model
self.DER_model_type = type(self.PV_model).__name__
self.simulation_events = events
self.simulation_events.del_t_event = self.tInc
self.initialize_solver(solver_type=solverType)
self.SOLVER_CONVERGENCE = False
self.convergence_failure_list =[]
self.LOOP_MODE = LOOP_MODE
self.COLLECT_SOLUTION = COLLECT_SOLUTION
self.jacFlag = jacFlag
self.check_jac_availability()
if self.PV_model.standAlone and gridModel is not None:
self.grid_model = gridModel
elif self.PV_model.standAlone and gridModel is None:
raise ValueError('`Grid` instance need to provided in stand alone mode for creating `GridSimulation` instance!')
#Remove existing simulation events
#self.simulation_events.remove_solar_event(3.0)
#self.simulation_events.remove_load_event(4.0)
#self.simulation_events.remove_grid_event(5.0)
self.solution_time = None #Always reset solution time to None
if self.LOOP_MODE:
self.reset_stored_trajectories()
self.initialize_y0_t()
except:
LogUtil.exception_handler()
@property
def y0(self):
""" Combine all initial conditions from solution."""
try:
if type(self.PV_model).__name__ == 'SolarPVDERThreePhase':
y0 = [self.iaR_t[-1], self.iaI_t[-1], self.xaR_t[-1], self.xaI_t[-1], self.uaR_t[-1],self.uaI_t[-1],\
self.ibR_t[-1], self.ibI_t[-1], self.xbR_t[-1], self.xbI_t[-1], self.ubR_t[-1],self.ubI_t[-1],\
self.icR_t[-1], self.icI_t[-1], self.xcR_t[-1], self.xcI_t[-1], self.ucR_t[-1],self.ucI_t[-1],\
self.Vdc_t[-1],
self.xDC_t[-1],self.xQ_t[-1],
self.xPLL_t[-1],self.wte_t[-1]]
elif type(self.PV_model).__name__ == 'SolarPVDERSinglePhase':
y0 =[self.iaR_t[-1], self.iaI_t[-1], self.xaR_t[-1], self.xaI_t[-1], self.uaR_t[-1],self.uaI_t[-1],\
self.Vdc_t[-1],
self.xDC_t[-1],self.xQ_t[-1],
self.xPLL_t[-1],self.wte_t[-1]]
elif type(self.PV_model).__name__ == 'SolarPVDERSinglePhaseConstantVdc':
y0 =[self.iaR_t[-1], self.iaI_t[-1], self.xaR_t[-1], self.xaI_t[-1], self.uaR_t[-1],self.uaI_t[-1],\
self.xP_t[-1],self.xQ_t[-1],
self.xPLL_t[-1],self.wte_t[-1]]
elif type(self.PV_model).__name__ == 'SolarPVDERThreePhaseConstantVdc':
y0 =[self.iaR_t[-1], self.iaI_t[-1], self.xaR_t[-1], self.xaI_t[-1], self.uaR_t[-1],self.uaI_t[-1],
self.ibR_t[-1], self.ibI_t[-1], self.xbR_t[-1], self.xbI_t[-1], self.ubR_t[-1],self.ubI_t[-1],
self.icR_t[-1], self.icI_t[-1], self.xcR_t[-1], self.xcI_t[-1], self.ucR_t[-1],self.ucI_t[-1],
self.xP_t[-1],self.xQ_t[-1],
self.xPLL_t[-1],self.wte_t[-1]]
elif self.DER_model_type == 'SolarPVDERThreePhaseBalanced':
y0 =[self.iaR_t[-1], self.iaI_t[-1], self.xaR_t[-1], self.xaI_t[-1], self.uaR_t[-1],self.uaI_t[-1],
self.Vdc_t[-1],
self.xDC_t[-1],self.xQ_t[-1],
self.xPLL_t[-1],self.wte_t[-1]]
elif self.DER_model_type == 'SolarPVDERThreePhaseNumba':
y0 = [self.iaR_t[-1], self.iaI_t[-1], self.xaR_t[-1], self.xaI_t[-1], self.uaR_t[-1],self.uaI_t[-1],\
self.ibR_t[-1], self.ibI_t[-1], self.xbR_t[-1], self.xbI_t[-1], self.ubR_t[-1],self.ubI_t[-1],\
self.icR_t[-1], self.icI_t[-1], self.xcR_t[-1], self.xcI_t[-1], self.ucR_t[-1],self.ucI_t[-1],\
self.Vdc_t[-1],
self.xDC_t[-1],self.xQ_t[-1],
self.xPLL_t[-1],self.wte_t[-1]]
return y0
except:
LogUtil.exception_handler()
#@property
def t_calc(self):
"""Vector of time steps for simulation"""
try:
#if (self.tStop - self.tStart) <= self.tInc:
# self.tStop = self.tStart + 1e-6 #+ self.tInc
return np.arange(self.tStart, self.tStop + self.tInc, self.tInc)
except:
LogUtil.exception_handler()
def check_jac_availability(self):
"""Check if Jacobian matrix is available."""
try:
if self.jacFlag:
if not self.DER_model_type in self.jac_list:
raise ValueError('{}:Jacobian matrix is not available for DER model:{}'.format(self.name,self.DER_model_type))
except:
LogUtil.exception_handler()
def initialize_y0_t(self):
"""Initialize y0_t."""
try:
self.iaR_t = np.array([self.PV_model.y0[0]])
self.iaI_t = np.array([self.PV_model.y0[1]])
self.xaR_t = np.array([self.PV_model.y0[2]])
self.xaI_t = np.array([self.PV_model.y0[3]])
self.uaR_t = np.array([self.PV_model.y0[4]])
self.uaI_t = np.array([self.PV_model.y0[5]])
if type(self.PV_model).__name__ == 'SolarPVDERSinglePhase':
self.Vdc_t = np.array([self.PV_model.y0[6]]) #DC link voltage variable
self.xDC_t = np.array([self.PV_model.y0[7]]) #DC link voltage control variable
self.xQ_t = np.array([self.PV_model.y0[8]]) #Reactive power control variable
self.xPLL_t = np.array([self.PV_model.y0[9]]) #PLL variables
self.wte_t = np.array([self.PV_model.y0[10]]) #Frequency integration to get angle
elif (self.DER_model_type == 'SolarPVDERThreePhase') or (self.DER_model_type == 'SolarPVDERThreePhaseNumba'):
self.ibR_t = np.array([self.PV_model.y0[6]])
self.ibI_t = np.array([self.PV_model.y0[7]])
self.xbR_t = np.array([self.PV_model.y0[8]])
self.xbI_t = np.array([self.PV_model.y0[9]])
self.ubR_t = np.array([self.PV_model.y0[10]])
self.ubI_t = np.array([self.PV_model.y0[11]])
self.icR_t = np.array([self.PV_model.y0[12]])
self.icI_t = np.array([self.PV_model.y0[13]])
self.xcR_t = np.array([self.PV_model.y0[14]])
self.xcI_t = np.array([self.PV_model.y0[15]])
self.ucR_t = np.array([self.PV_model.y0[16]])
self.ucI_t = np.array([self.PV_model.y0[17]])
self.Vdc_t = np.array([self.PV_model.y0[18]])
self.xDC_t = np.array([self.PV_model.y0[19]])
self.xQ_t = np.array([self.PV_model.y0[20]])
self.xPLL_t = np.array([self.PV_model.y0[21]])
self.wte_t = np.array([self.PV_model.y0[22]])
elif type(self.PV_model).__name__ == 'SolarPVDERThreePhaseConstantVdc':
self.ibR_t = np.array([self.PV_model.y0[6]])
self.ibI_t = np.array([self.PV_model.y0[7]])
self.xbR_t = np.array([self.PV_model.y0[8]])
self.xbI_t = np.array([self.PV_model.y0[9]])
self.ubR_t = np.array([self.PV_model.y0[10]])
self.ubI_t = np.array([self.PV_model.y0[11]])
self.icR_t = np.array([self.PV_model.y0[12]])
self.icI_t = np.array([self.PV_model.y0[13]])
self.xcR_t = np.array([self.PV_model.y0[14]])
self.xcI_t = np.array([self.PV_model.y0[15]])
self.ucR_t = np.array([self.PV_model.y0[16]])
self.ucI_t = np.array([self.PV_model.y0[17]])
self.Vdc_t = np.array([self.PV_model.Vdc]) #Voltage is constant
self.xP_t = np.array([self.PV_model.y0[18]]) #Active power control variable
self.xQ_t = np.array([self.PV_model.y0[19]]) #Reactive power control variable
self.xPLL_t = np.array([self.PV_model.y0[20]]) #PLL variables
self.wte_t = np.array([self.PV_model.y0[21]]) #Frequency integration to get angle
elif type(self.PV_model).__name__ == 'SolarPVDERThreePhaseBalanced':
ia_t = self.iaR_t+self.iaI_t*1j
xa_t = self.xaR_t+self.xaI_t*1j
ua_t = self.uaR_t+self.uaI_t*1j
ib_t = utility_functions.Ub_calc(ia_t)
xb_t = utility_functions.Ub_calc(xa_t)
ub_t = utility_functions.Ub_calc(ua_t)
ic_t = utility_functions.Uc_calc(ia_t)
xc_t = utility_functions.Uc_calc(xa_t)
uc_t = utility_functions.Uc_calc(ua_t)
self.ibR_t = ib_t.real
self.ibI_t = ib_t.imag
self.xbR_t = xb_t.real
self.xbI_t = xb_t.imag
self.ubR_t = ub_t.real
self.ubI_t = ub_t.imag
self.icR_t = ic_t.real
self.icI_t = ic_t.imag
self.xcR_t = xc_t.real
self.xcI_t = xc_t.imag
self.ucR_t = uc_t.real
self.ucI_t = uc_t.imag
self.Vdc_t = np.array([self.PV_model.y0[6]]) #DC link voltage variable
self.xDC_t = np.array([self.PV_model.y0[7]]) #DC link voltage control variable
self.xQ_t = np.array([self.PV_model.y0[8]]) #Reactive power control variable
self.xPLL_t = np.array([self.PV_model.y0[9]]) #PLL variables
self.wte_t = np.array([self.PV_model.y0[10]]) #Frequency integration to get angle
elif type(self.PV_model).__name__ == 'SolarPVDERSinglePhaseConstantVdc':
self.Vdc_t = np.array([self.PV_model.Vdc]) #Voltage is constant
self.xP_t = np.array([self.PV_model.y0[6]]) #Active power control variable
self.xQ_t = np.array([self.PV_model.y0[7]]) #Reactive power control variable
self.xPLL_t = np.array([self.PV_model.y0[8]]) #PLL variables
self.wte_t = np.array([self.PV_model.y0[9]]) #Frequency integration to get angle
except:
LogUtil.exception_handler()
def reset_stored_trajectories(self):
"""Reset for plotting."""
try:
self._t_t = np.array(0.0)
self.Vdc_t = self._Vdc_t = np.array(self.PV_model.Vdc)
self.ia_t = self._ia_t = np.array(self.PV_model.ia)
self.ma_t = self._ma_t = np.array(self.PV_model.ma)
self.vta_t = self._vta_t = np.array(self.PV_model.vta)
self.va_t = self._va_t = np.array(self.PV_model.va)
self.ma_absolute_t = self._ma_absolute_t = np.array(abs(self.PV_model.ma))
self.Varms_t = self._Varms_t = np.array(abs(self.PV_model.va)/math.sqrt(2))
if type(self.PV_model).__name__ in templates.three_phase_models:
self.mb_absolute_t = self._mb_absolute_t = np.array(abs(self.PV_model.mb))
self.mc_absolute_t = self._mc_absolute_t = np.array(abs(self.PV_model.mc))
self.Vbrms_t = self._Vbrms_t = np.array(abs(self.PV_model.vb)/math.sqrt(2))
self.Vcrms_t = self._Vcrms_t = np.array(abs(self.PV_model.vc)/math.sqrt(2))
self.ib_t = self._ib_t = np.array(self.PV_model.ib)
self.mb_t = self._mb_t = np.array(self.PV_model.mb)
self.vtb_t = self._vtb_t = np.array(self.PV_model.vtb)
self.vb_t = self._vb_t = np.array(self.PV_model.vb)
self.ic_t = self._ic_t = np.array(self.PV_model.ic)
self.mc_t = self._mc_t = np.array(self.PV_model.mc)
self.vtc_t = self._vtc_t = np.array(self.PV_model.vtc)
self.vc_t = self._vc_t = | np.array(self.PV_model.vc) | numpy.array |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# from __future__ import absolute_import, division, print_function
import locale
import matplotlib as mpl
from matplotlib import docstring
import numpy as np
import random
from matplotlib.projections.polar import PolarAxes
from numpy.lib.twodim_base import histogram2d
import matplotlib.pyplot as plt
ZBASE = -1000 # The starting zorder for all drawing, negative to have the grid on
VAR_DEFAULT = "speed"
DIR_DEFAULT = "direction"
FIGSIZE_DEFAULT = (8, 8)
DPI_DEFAULT = 80
CALM_CIRCLE_COLOR = "red"
CALM_CIRCLE_ALPHA = 0.4
class WindAxesFactory(object):
"""
Factory class to create WindroseAxes or WindAxes
"""
@staticmethod
def create(typ, ax=None, *args, **kwargs):
"""
Create
Mandatory:
Parameters
----------
typ : string, 'windroseaxes' or 'windaxes'
Type of axes to create
* windroseaxes : a WindroseAxes axe
* windaxe : a WindAxes axe
ax : matplotlib.Axes, optional
A matplotlib axe
"""
typ = typ.lower()
d = {"windroseaxes": WindroseAxes, "windaxes": WindAxes}
if typ in d.keys():
cls = d[typ]
if isinstance(ax, cls):
return ax
else:
ax = cls.from_ax(ax, *args, **kwargs)
return ax
else:
raise NotImplementedError("typ=%r but it might be in %s" % (typ, d.keys()))
class WindroseAxes(PolarAxes):
"""
Create a windrose axes
"""
name = "windrose"
def __init__(self, *args, **kwargs):
"""
See Axes base class for args and kwargs documentation
"""
# Uncomment to have the possibility to change the resolution directly
# when the instance is created
# self.RESOLUTION = kwargs.pop('resolution', 100)
self.rmax = kwargs.pop("rmax", None)
self.theta_labels = kwargs.pop("theta_labels", ["E", "N-E", "N", "N-W", "W", "S-W", "S", "S-E"])
PolarAxes.__init__(self, *args, **kwargs)
self.set_aspect("equal", adjustable="box", anchor="C")
self.radii_angle = 67.5
self.cla()
@staticmethod
def from_ax(ax=None, fig=None, rmax=None, theta_labels=None, rect=None, *args, **kwargs):
"""
Return a WindroseAxes object for the figure `fig`.
"""
if ax is None:
if fig is None:
fig = plt.figure(
figsize=FIGSIZE_DEFAULT,
dpi=DPI_DEFAULT,
facecolor="w",
edgecolor="w",
)
if rect is None:
rect = [0.1, 0.1, 0.8, 0.8]
ax = WindroseAxes(fig, rect, rmax=rmax, theta_labels=theta_labels, *args, **kwargs)
fig.add_axes(ax)
return ax
else:
return ax
def cla(self):
"""
Clear the current axes
"""
PolarAxes.cla(self)
self.theta_angles = np.arange(0, 360, 22.5)
self.set_thetagrids(angles=self.theta_angles, labels=self.theta_labels)
self._info = {"dir": list(), "bins": list(), "table": list()}
self.patches_list = list()
self.calm_count = None
def _colors(self, cmap, n):
"""
Returns a list of n colors based on the colormap cmap
"""
return [cmap(i) for i in np.linspace(0.0, 1.0, n)]
def set_radii_angle(self, **kwargs):
"""
Set the radii labels angle
"""
kwargs.pop("labels", None)
angle = kwargs.pop("angle", None)
if angle is None:
angle = self.radii_angle
self.radii_angle = angle
N = 5
rmax = self.get_rmax()
radii = np.linspace(0, rmax, N + 1)
if rmax % N == 0:
fmt = "%d"
else:
fmt = "%.1f"
radii_labels = [fmt % r for r in radii]
# radii_labels[0] = "" # Removing label 0
self.set_rgrids(
radii=radii[1:], labels=radii_labels[1:], angle=self.radii_angle, **kwargs
)
def _update(self):
if not self.rmax:
self.rmax = np.max(np.sum(self._info["table"], axis=0))
calm_count = self.calm_count or 0
self.set_rmax(rmax=self.rmax + calm_count)
self.set_radii_angle(angle=self.radii_angle)
def legend(self, loc="lower left", decimal_places=1, units=None, **kwargs):
"""
Sets the legend location and her properties.
Parameters
----------
loc : int, string or pair of floats, default: 'lower left'
see :obj:`matplotlib.pyplot.legend`.
decimal_places : int, default 1
The decimal places of the formated legend
units: str, default None
Other Parameters
----------------
isaxes : boolean, default True
whether this is an axes legend
prop : FontProperties(size='smaller')
the font property
borderpad : float
the fractional whitespace inside the legend border
shadow : boolean
if True, draw a shadow behind legend
labelspacing : float, 0.005
the vertical space between the legend entries
handlelenght : float, 0.05
the length of the legend lines
handletextsep : float, 0.02
the space between the legend line and legend text
borderaxespad : float, 0.02
the border between the axes and legend edge
kwarg
Every other kwarg argument supported by
:obj:`matplotlib.pyplot.legend`
"""
def get_handles():
handles = list()
for p in self.patches_list:
if isinstance(p, mpl.patches.Polygon) or isinstance(
p, mpl.patches.Rectangle
):
color = p.get_facecolor()
elif isinstance(p, mpl.lines.Line2D):
color = p.get_color()
else:
raise AttributeError("Can't handle patches")
handles.append(
mpl.patches.Rectangle(
(0, 0), 0.2, 0.2, facecolor=color, edgecolor="black"
)
)
return handles
def get_labels(decimal_places=1, units=None):
_decimal_places = str(decimal_places)
fmt = "[%." + _decimal_places + "f " + ": %0." + _decimal_places + "f"
labels = np.copy(self._info["bins"])
if locale.getlocale()[0] in ["fr_FR"]:
fmt += "["
else:
fmt += ")"
if units:
fmt += ' ' + units
labels = [fmt % (labels[i], labels[i + 1]) for i in range(len(labels) - 1)]
return labels
kwargs.pop("labels", None)
kwargs.pop("handles", None)
# decimal_places = kwargs.pop('decimal_places', 1)
handles = get_handles()
labels = get_labels(decimal_places, units)
self.legend_ = mpl.legend.Legend(self, handles, labels, loc, **kwargs)
return self.legend_
def set_legend(self, **pyplot_arguments):
if "borderaxespad" not in pyplot_arguments:
pyplot_arguments["borderaxespad"] = -0.10
legend = self.legend(**pyplot_arguments)
plt.setp(legend.get_texts(), fontsize=8)
return legend
def _init_plot(self, direction, var, **kwargs):
"""
Internal method used by all plotting commands
Parameters
----------
direction : 1D array,
directions the wind blows from, North centred
var : 1D array,
values of the variable to compute. Typically the wind speeds
Other Parameters
----------------
normed : boolean, default False
blowto : boolean, default False
colors : str or list of str, default None
The colors of the plot.
cmap : color map, default `jet`
A :obj:`matplotlib.cm` colormap for the plot.
Warning! It overrides `colors`.
weibull_factors :
mean_values :
frequency :
kwarg
Any argument accepted by :obj:`matplotlib.pyplot.plot`.
"""
# if weibull factors are entered overwrite direction and var
if "weibull_factors" in kwargs or "mean_values" in kwargs:
if "weibull_factors" in kwargs and "mean_values" in kwargs:
raise TypeError("cannot specify both weibull_factors and mean_values")
statistic_type = "unset"
if "weibull_factors" in kwargs:
statistic_type = "weibull"
val = kwargs.pop("weibull_factors")
elif "mean_values" in kwargs:
statistic_type = "mean"
val = kwargs.pop("mean_values")
if val:
if "frequency" not in kwargs:
raise TypeError(
"specify 'frequency' argument for statistical input"
)
windFrequencies = kwargs.pop("frequency")
if len(windFrequencies) != len(direction) or len(direction) != len(var):
if len(windFrequencies) != len(direction):
raise TypeError("len(frequency) != len(direction)")
elif len(direction) != len(var):
raise TypeError("len(frequency) != len(direction)")
windSpeeds = []
windDirections = []
for dbin in range(len(direction)):
for _ in range(int(windFrequencies[dbin] * 10000)):
if statistic_type == "weibull":
windSpeeds.append(
random.weibullvariate(var[dbin][0], var[dbin][1])
)
elif statistic_type == "mean":
windSpeeds.append(
random.weibullvariate(var[dbin] * 2 / np.sqrt(np.pi), 2)
)
windDirections.append(direction[dbin])
var, direction = windSpeeds, windDirections
# self.cla()
kwargs.pop("zorder", None)
# Init of the bins array if not set
bins = kwargs.pop("bins", None)
if bins is None:
bins = np.linspace(np.min(var), np.max(var), 6)
if isinstance(bins, int):
bins = np.linspace(np.min(var), np.max(var), bins)
bins = np.asarray(bins)
nbins = len(bins)
# Number of sectors
nsector = kwargs.pop("nsector", None)
if nsector is None:
nsector = 16
# Sets the colors table based on the colormap or the "colors" argument
colors = kwargs.pop("colors", None)
cmap = kwargs.pop("cmap", None)
if colors is not None:
if isinstance(colors, str):
colors = [colors] * nbins
if isinstance(colors, (tuple, list)):
if len(colors) != nbins:
raise ValueError("colors and bins must have same length")
else:
if cmap is None:
cmap = mpl.cm.jet
colors = self._colors(cmap, nbins)
# Building the angles list
angles = np.arange(0, -2 * np.pi, -2 * np.pi / nsector) + np.pi / 2
normed = kwargs.pop("normed", False)
blowto = kwargs.pop("blowto", False)
# Calm condition
calm_limit = kwargs.pop("calm_limit", None)
if calm_limit is not None:
mask = var > calm_limit
self.calm_count = len(var) - np.count_nonzero(mask)
if normed:
self.calm_count = self.calm_count * 100 / len(var)
var = var[mask]
direction = direction[mask]
# Set the global information dictionnary
self._info["dir"], self._info["bins"], self._info["table"] = histogram(
direction, var, bins, nsector, normed, blowto
)
return bins, nbins, nsector, colors, angles, kwargs
def _calm_circle(self):
"""
Draw the calm centered circle
and return the initial offset for plots methods
"""
if self.calm_count and self.calm_count > 0:
circle = mpl.patches.Circle(
(0., 0.),
self.calm_count,
transform=self.transData._b,
color=CALM_CIRCLE_COLOR,
alpha=CALM_CIRCLE_ALPHA,
)
self.add_artist(circle)
return self.calm_count or 0
def contour(self, direction, var, **kwargs):
"""
Plot a windrose in linear mode. For each var bins, a line will be
draw on the axes, a segment between each sector (center to center).
Each line can be formated (color, width, ...) like with standard plot
pylab command.
Parameters
----------
direction : 1D array
directions the wind blows from, North centred
var : 1D array
values of the variable to compute. Typically the wind speeds.
Other Parameters
----------------
sector : integer, optional
number of sectors used to compute the windrose table. If not set,
nsectors=16, then each sector will be 360/16=22.5°, and the
resulting computed table will be aligned with the cardinals points.
bins : 1D array or integer, optional
number of bins, or a sequence of bins variable. If not set, bins=6,
then bins=linspace(min(var), max(var), 6)
blowto : bool, optional
If True, the windrose will be pi rotated, to show where the wind
blow to (usefull for pollutant rose).
colors : string or tuple, optional
one string color ('k' or 'black'), in this case all bins will be
plotted in this color; a tuple of matplotlib color args (string,
float, rgb, etc), different levels will be plotted in different
colors in the order specified.
cmap : a cm Colormap instance from :obj:`matplotlib.cm`, optional
if cmap == None and colors == None, a default Colormap is used.
others kwargs
Any supported argument of :obj:`matplotlib.pyplot.plot`
"""
bins, nbins, nsector, colors, angles, kwargs = self._init_plot(
direction, var, **kwargs
)
# closing lines
angles = np.hstack((angles, angles[-1] - 2 * np.pi / nsector))
vals = np.hstack(
(
self._info["table"],
np.reshape(
self._info["table"][:, 0], (self._info["table"].shape[0], 1)
),
)
)
offset = self._calm_circle()
for i in range(nbins):
val = vals[i, :] + offset
offset += vals[i, :]
zorder = ZBASE + nbins - i
patch = self.plot(angles, val, color=colors[i], zorder=zorder, **kwargs)
self.patches_list.extend(patch)
self._update()
def contourf(self, direction, var, **kwargs):
"""
Plot a windrose in filled mode. For each var bins, a line will be
draw on the axes, a segment between each sector (center to center).
Each line can be formated (color, width, ...) like with standard plot
pylab command.
Parameters
----------
direction : 1D array
directions the wind blows from, North centred
var : 1D array
values of the variable to compute. Typically the wind speeds
Other Parameters
----------------
nsector: integer, optional
number of sectors used to compute the windrose table. If not set,
nsectors=16, then each sector will be 360/16=22.5°, and the
resulting computed table will be aligned with the cardinals points.
bins : 1D array or integer, optional
number of bins, or a sequence of bins variable. If not set, bins=6,
then bins=linspace(min(`var`), max(`var`), 6)
blowto : bool, optional
If True, the windrose will be pi rotated, to show where the wind
blow to (usefull for pollutant rose).
colors : string or tuple, optional
one string color ('k' or 'black'), in this case all bins will be
plotted in this color; a tuple of matplotlib color args (string,
float, rgb, etc), different levels will be plotted in different
colors in the order specified.
cmap : a cm Colormap instance from :obj:`matplotlib.cm`, optional
if cmap == None and colors == None, a default Colormap is used.
others kwargs
Any supported argument of :obj:`matplotlib.pyplot.plot`
"""
bins, nbins, nsector, colors, angles, kwargs = self._init_plot(
direction, var, **kwargs
)
kwargs.pop("facecolor", None)
kwargs.pop("edgecolor", None)
# closing lines
angles = np.hstack((angles, angles[-1] - 2 * np.pi / nsector))
vals = np.hstack(
(
self._info["table"],
np.reshape(
self._info["table"][:, 0], (self._info["table"].shape[0], 1)
),
)
)
offset = self._calm_circle()
for i in range(nbins):
val = vals[i, :] + offset
offset += vals[i, :]
zorder = ZBASE + nbins - i
patch = self.fill( | np.append(angles, 0) | numpy.append |
"""Implement the GeometricProgram class"""
import sys
from time import time
from collections import defaultdict
import numpy as np
from ..nomials import NomialData
from ..small_classes import CootMatrix, SolverLog, Numbers
from ..keydict import KeyDict, KeySet
from ..small_scripts import mag
from ..solution_array import SolutionArray
from .costed import CostedConstraintSet
DEFAULT_SOLVER_KWARGS = {"cvxopt": {"kktsolver": "ldl"}}
class GeometricProgram(CostedConstraintSet, NomialData):
# pylint: disable=too-many-instance-attributes
"""Standard mathematical representation of a GP.
Arguments
---------
cost : Constraint
Posynomial to minimize when solving
constraints : list of Posynomials
Constraints to maintain when solving (implicitly Posynomials <= 1)
GeometricProgram does not accept equality constraints (e.g. x == 1);
instead use two inequality constraints (e.g. x <= 1, 1/x <= 1)
verbosity : int (optional)
If verbosity is greater than zero, warns about missing bounds
on creation.
Attributes with side effects
----------------------------
`solver_out` and `solver_log` are set during a solve
`result` is set at the end of a solve if solution status is optimal
Examples
--------
>>> gp = gpkit.geometric_program.GeometricProgram(
# minimize
x,
[ # subject to
1/x # <= 1, implicitly
])
>>> gp.solve()
"""
def __init__(self, cost, constraints, substitutions,
allow_missingbounds=False):
# pylint:disable=super-init-not-called
# initialize attributes modified by internal methods
self.result = None
self.v_ss = None
self.nu_by_posy = None
self.solver_log = None
self.solver_out = None
# GPs get varkeys from NomialData._reset, in .gen()
self.__bare_init__(cost, constraints, substitutions, varkeys=False)
for key, sub in self.substitutions.items():
if hasattr(sub, "exp") and not sub.exp:
sub = sub.value
if hasattr(sub, "units"):
sub = sub.to(key.units or "dimensionless").magnitude
self.substitutions[key] = sub
# only allow Numbers and ndarrays
if not isinstance(sub, (Numbers, np.ndarray)):
raise ValueError("substitution {%s: %s} with value type %s is"
" not allowed in .substitutions; such"
" substitutions must be done by using"
" .subinplace()." % (key, sub, type(sub)))
self.posynomials = [cost.sub(self.substitutions)]
self.posynomials.extend(self.as_posyslt1(self.substitutions))
self.hmaps = [p.hmap for p in self.posynomials]
## Generate various maps into the posy- and monomials
# k [j]: number of monomials (columns of F) present in each constraint
self.k = [len(hm) for hm in self.hmaps]
p_idxs = [] # p_idxs [i]: posynomial index of each monomial
self.m_idxs = [] # m_idxs [i]: monomial indices of each posynomial
for i, p_len in enumerate(self.k):
self.m_idxs.append(list(range(len(p_idxs), len(p_idxs) + p_len)))
p_idxs += [i]*p_len
self.p_idxs = np.array(p_idxs)
# m_idxs: first exp-index of each monomial equality
self.meq_idxs = [sum(self.k[:i]) for i, p in enumerate(self.posynomials)
if getattr(p, "from_meq", False)]
self.gen() # A [i, v]: sparse matrix of powers in each monomial
if any(c <= 0 for c in self._cs):
raise ValueError("GeometricPrograms cannot contain Signomials.")
if self.missingbounds and not allow_missingbounds:
boundstrs = "\n".join(" %s has no %s bound%s" % (v, b, x)
for (v, b), x in self.missingbounds.items())
raise ValueError("Geometric Program is not fully bounded:\n"
+ boundstrs)
def gen(self):
"Generates nomial and solve data (A, p_idxs) from posynomials"
self._reset() # method from NomialData
self._exps, self._cs = [], []
for hmap in self.hmaps:
self._exps.extend(hmap.keys())
self._cs.extend(hmap.values())
self.A, self.missingbounds = genA(self.exps, self.varlocs,
self.meq_idxs)
@property
def varkeys(self):
"The GP's varkeys, created when necessary."
if self._varkeys is None:
self._varkeys = KeySet(self.varlocs)
return self._varkeys
# pylint: disable=too-many-statements, too-many-locals
def solve(self, solver=None, verbosity=1, warn_on_check=False,
process_result=True, **kwargs):
"""Solves a GeometricProgram and returns the solution.
Arguments
---------
solver : str or function (optional)
By default uses one of the solvers found during installation.
If set to "mosek", "mosek_cli", or "cvxopt", uses that solver.
If set to a function, passes that function cs, A, p_idxs, and k.
verbosity : int (optional)
If greater than 0, prints solver name and solve time.
**kwargs :
Passed to solver constructor and solver function.
Returns
-------
result : dict
A dictionary containing the translated solver result; keys below.
cost : float
The value of the objective at the solution.
variables : dict
The value of each variable at the solution.
sensitivities : dict
monomials : array of floats
Each monomial's dual variable value at the solution.
posynomials : array of floats
Each posynomials's dual variable value at the solution.
"""
def _get_solver(solver):
"""Get the solverfn and solvername associated with solver"""
if solver is None:
from .. import settings
solver = settings.get("default_solver", None)
if not solver:
raise ValueError(
"No solver was given; perhaps gpkit was not properly"
" installed, or found no solvers during the"
" installation process.")
if solver == "cvxopt":
from .._cvxopt import cvxoptimize
solverfn = cvxoptimize
elif solver == "mosek_cli":
from .._mosek import cli_expopt
solverfn = cli_expopt.imize_fn(**kwargs)
elif solver == "mosek":
from .._mosek import expopt
solverfn = expopt.imize
elif hasattr(solver, "__call__"):
solverfn = solver
solver = solver.__name__
else:
raise ValueError("Unknown solver '%s'." % solver)
return solverfn, solver
solverfn, solvername = _get_solver(solver)
starttime = time()
if verbosity > 0:
print("Using solver '%s'" % solvername)
print("Solving for %i variables." % len(self.varlocs))
solver_kwargs = DEFAULT_SOLVER_KWARGS.get(solvername, {})
solver_kwargs.update(kwargs)
# NOTE: SIDE EFFECTS AS WE LOG SOLVER'S STDOUT AND OUTPUT
original_stdout = sys.stdout
self.solver_log = SolverLog(verbosity-1, original_stdout)
try:
sys.stdout = self.solver_log # CAPTURED
solver_out = solverfn(c=self.cs, A=self.A, p_idxs=self.p_idxs,
k=self.k, **solver_kwargs)
self.solver_out = solver_out
finally:
sys.stdout = original_stdout
# STDOUT HAS BEEN RETURNED. ENDING SIDE EFFECTS.
self.solver_log = "\n".join(self.solver_log)
soltime = time() - starttime
if verbosity > 0:
print("Solving took %.3g seconds." % (soltime,))
tic = time()
# allow mosek's NEAR_DUAL_FEAS solution status, because our check
# will catch anything that's not actually near enough.
# TODO: implement this in the mosek / mosek_cli interfaces, not here.
solver_status = str(solver_out.get("status", None))
if solver_status.lower() not in ["optimal", "near_dual_feas"]:
raise RuntimeWarning(
"final status of solver '%s' was '%s', not 'optimal'.\n\n"
"The solver's result is stored in model.program.solver_out. "
"A result dict can be generated via "
"program._compile_result(program.solver_out)." %
(solvername, solver_status))
if solver_status.lower() == "near_dual_feas":
print(RuntimeWarning(
"final status of solver '%s' was '%s', not 'optimal'.\n\n"
% (solvername, solver_status)))
self.result = self._compile_result(solver_out) # NOTE: SIDE EFFECTS
if verbosity > 1:
print("result packing took %.2g%% of solve time" %
((time() - tic) / soltime * 100))
tic = time()
try:
self.check_solution(self.result["cost"], solver_out['primal'],
nu=solver_out["nu"], la=solver_out["la"])
except RuntimeWarning as e:
if warn_on_check:
print("Solution check warning: %s" % e)
else:
raise e
if verbosity > 1:
print("solution checking took %.2g%% of solve time" %
((time() - tic) / soltime * 100))
if process_result:
self.process_result(self.result)
self.result["soltime"] = soltime
return self.result
def _generate_nula(self, solver_out):
solver_out["primal"] = np.ravel(solver_out['primal'])
if "nu" in solver_out:
# solver gave us monomial sensitivities, generate posynomial ones
nu = np.ravel(solver_out["nu"])
self.nu_by_posy = [nu[mi] for mi in self.m_idxs]
la = np.array([sum(nup) for nup in self.nu_by_posy])
elif "la" in solver_out:
# solver gave us posynomial sensitivities, generate monomial ones
la = np.ravel(solver_out["la"])
if len(la) == len(self.hmaps) - 1:
# assume the solver dropped the cost's sensitivity (always 1.0)
la = np.hstack(([1.0], la))
Ax = np.ravel(self.A.dot(solver_out['primal']))
z = Ax + np.log(self.cs)
m_iss = [self.p_idxs == i for i in range(len(la))]
self.nu_by_posy = [la[p_i]*np.exp(z[m_is])/sum(np.exp(z[m_is]))
for p_i, m_is in enumerate(m_iss)]
nu = np.hstack(self.nu_by_posy)
else:
raise RuntimeWarning("The dual solution was not returned.")
solver_out["nu"], solver_out["la"] = nu, la
def _compile_result(self, solver_out):
"""Creates a result dict (as returned by solve() from solver output
This internal method is called from within the solve() method, unless
solver_out["status"] is not "optimal", in which case a RuntimeWarning
is raised prior to this method being called. In that case, users
may use this method to attempt to create a results dict from the
output of the failed solve.
Arguments
---------
solver_out: dict
dict in format returned by solverfn within GeometricProgram.solve
Returns
-------
result: dict
dict in format returned by GeometricProgram.solve()
"""
self._generate_nula(solver_out)
primal = solver_out["primal"]
nu, la = solver_out["nu"], solver_out["la"]
# confirm lengths before calling zip
if not self.varlocs and len(primal) == 1 and primal[0] == 0:
primal = [] # an empty result, as returned by MOSEK
assert len(self.varlocs) == len(primal)
result = {"freevariables": KeyDict(zip(self.varlocs, np.exp(primal)))}
# get cost #
if "objective" in solver_out:
result["cost"] = float(solver_out["objective"])
else:
# use self.posynomials[0] because the cost may have had constants
freev = result["freevariables"]
cost = self.posynomials[0].sub(freev)
if cost.varkeys:
raise ValueError("cost contains unsolved variables %s"
% cost.varkeys.keys())
result["cost"] = mag(cost.c)
# get sensitivities #
result["constants"] = KeyDict(self.substitutions)
result["variables"] = KeyDict(result["freevariables"])
result["variables"].update(result["constants"])
result["sensitivities"] = {"nu": nu, "la": la}
self.v_ss = self.sens_from_dual(la[1:].tolist(), self.nu_by_posy[1:],
result)
# add cost's sensitivity in (nu could be self.nu_by_posy[0])
cost_senss = {var: sum([self.cost.exps[i][var]*nu[i] for i in locs])
for (var, locs) in self.cost.varlocs.items()}
# not using HashVector addition because we want to preseve zeros
var_senss = self.v_ss.copy()
for key, value in cost_senss.items():
var_senss[key] = value + var_senss.get(key, 0)
# carry linked sensitivities over to their constants
for v in var_senss.keys():
if v.gradients:
dlogcost_dlogv = var_senss.pop(v)
val = result["constants"][v]
for c, dv_dc in v.gradients.items():
dlogv_dlogc = dv_dc * result["constants"][c]/val
accum = var_senss.get(c, 0)
var_senss[c] = dlogcost_dlogv*dlogv_dlogc + accum
if v in cost_senss:
if c in self.cost.varkeys:
dlogcost_dlogv = cost_senss.pop(v)
accum = cost_senss.get(c, 0)
cost_senss[c] = dlogcost_dlogv*dlogv_dlogc + accum
result["sensitivities"]["cost"] = cost_senss
result["sensitivities"]["variables"] = KeyDict(var_senss)
const_senss = {k: v for k, v in var_senss.items()
if k in result["constants"]}
result["sensitivities"]["constants"] = KeyDict(const_senss)
return SolutionArray(result)
# TODO: set tol by solver? or otherwise return it to 1e-5 for mosek
def check_solution(self, cost, primal, nu, la, tol=1e-3, abstol=1e-20):
"""Run a series of checks to mathematically confirm sol solves this GP
Arguments
---------
cost: float
cost returned by solver
primal: list
primal solution returned by solver
nu: numpy.ndarray
monomial lagrange multiplier
la: numpy.ndarray
posynomial lagrange multiplier
Raises
------
RuntimeWarning, if any problems are found
"""
def _almost_equal(num1, num2):
"local almost equal test"
return (num1 == num2 or abs((num1 - num2) / (num1 + num2)) < tol
or abs(num1 - num2) < abstol)
A = self.A.tocsr()
# check primal sol
primal_exp_vals = self.cs * np.exp(A.dot(primal)) # c*e^Ax
if not _almost_equal(primal_exp_vals[self.m_idxs[0]].sum(), cost):
raise RuntimeWarning("Primal solution computed cost did not match"
" solver-returned cost: %s vs %s" %
(primal_exp_vals[self.m_idxs[0]].sum(), cost))
for mi in self.m_idxs[1:]:
if primal_exp_vals[mi].sum() > 1 + tol:
raise RuntimeWarning("Primal solution violates constraint:"
" %s is greater than 1." %
primal_exp_vals[mi].sum())
# check dual sol
# note: follows dual formulation in section 3.1 of
# http://web.mit.edu/~whoburg/www/papers/hoburg_phd_thesis.pdf
if not _almost_equal(self.nu_by_posy[0].sum(), 1.):
raise RuntimeWarning("Dual variables associated with objective sum"
" to %s, not 1" % self.nu_by_posy[0].sum())
if any(nu < 0):
if all(nu > -tol/1000.): # HACK, see issue 528
print("Allowing negative dual variable(s) as small as"
" %s." % min(nu))
else:
raise RuntimeWarning("Dual solution has negative entries as"
" small as %s." % min(nu))
ATnu = A.T.dot(nu)
if any( | np.abs(ATnu) | numpy.abs |
import numpy as np
from utils import file_helper, pulse_helper
TILECAL = 1
NUMBER_OF_CELLS = 1
# Represents all possible probabilities of the cell receive signals
# Example: 0.5 equals 50% of chance of receiving a signal in a collision.
# We can use an array to generate signas for several probabilities.
# signal_probabilities = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1]
signal_probabilities = [1.0]
def _number_of_samples_based_on(TILECAL):
return 7 if TILECAL else 10
# Generates a base data that will be randomized to simulate the real signal
def _base_data(number_of_data):
mu, sigma = 30, 1.5 # Mean and standard deviation
return np.random.normal(mu, sigma, number_of_data) # Base data
def _pileup_indexes(number_of_data):
pu_indexes = np.random.permutation(number_of_data)
# What about when "signal_probability" is float?
pu_indexes = pu_indexes[0:int(signal_probability * number_of_data)]
return pu_indexes
def _pileup():
pileup_mean = 100 # Exponential pileup mean
return np.random.exponential(pileup_mean)
# pileup should be added in the position "i" and then in its corners.
# for exemple, for n=100, i=97, and a 7th dimension signal we will have pileup
# added at positions 94, 95, 96, 97, 98, 99, 100
def _apply_pileup_indexes_when_tilecal(i, pu_indexes, x):
pu = np.multiply(_pileup(), pulse_helper.get_jitter_pulse())
if pu_indexes[i] < 4:
for j in range(pu_indexes[i]-2, 3):
x[pu_indexes[i] + j] = x[pu_indexes[i] + j] + pu[j + 4]
elif pu_indexes[i] > (number_of_data - 3):
for j in range(-4, number_of_data - pu_indexes[i]):
x[pu_indexes[i] + j] = x[pu_indexes[i] + j] + pu[j + 4]
else:
for j in range(-4, 3):
x[pu_indexes[i] + j] = x[pu_indexes[i] + j] + pu[j + 4]
return x
def _apply_pileup_indexes(i, pu_indexes, x):
pu = np.multiply(_pileup(), pulse_helper.get_pulse_paper_COF())
if pu_indexes[i] < 3:
for j in range(pu_indexes[i]-2, 7):
x[pu_indexes[i] + j] = x[pu_indexes[i] + j] + pu[j + 3]
elif pu_indexes[i] > 999993:
for j in range(-3, number_of_data - pu_indexes[i]):
x[pu_indexes[i] + j] = x[pu_indexes[i] + j] + pu[j + 3]
else:
for j in range(-3, 7):
x[pu_indexes[i] + j] = x[pu_indexes[i] + j] + pu[j + 3]
return x
if __name__ == '__main__':
# Creating the dataset.
number_of_events = 10000000
number_of_samples = _number_of_samples_based_on(TILECAL)
number_of_data = number_of_samples * number_of_events
# Control when generate noise or signal
is_noise = 1
for level in range(0, len(signal_probabilities)):
print('Processing: for {0:2.6f}\n'
.format(signal_probabilities[level]))
signal_probability = signal_probabilities[level] # Signal_probability
signal_mean = 300 # Exponential signal mean
x = _base_data(number_of_data)
pu_indexes = _pileup_indexes(number_of_data)
if signal_probability > 0:
for i in range(0, int(signal_probability * number_of_data)):
if TILECAL:
x = _apply_pileup_indexes_when_tilecal(i, pu_indexes, x)
else:
# NOT TESTED!!!
print('NOT TESTED!!!')
x = _apply_pileup_indexes(i, pu_indexes, x)
# Formatting data
data = np.reshape(x, (number_of_samples, number_of_events))
data = | np.transpose(data) | numpy.transpose |
## regressionVec.py
## vector-valued regression (with diagonal covariance noise matrix)
from __future__ import division, print_function, absolute_import
import numpy as np
import tensorflow as tf
import itertools as it
import scipy.special as ss
def eNet(alpha, lam, v):
"""Elastic-net regularization penalty"""
return lam * (alpha * tf.reduce_sum(tf.abs(v)) +
(1-alpha) * tf.reduce_sum(tf.square(v)))
def calcMargiProb(cadId, M):
"""Returns p(M=j) in vector form"""
return np.array([np.sum(cadId == m) for m in range(M)]) / cadId.shape[0]
def calcJointProb(G, cadId, M):
"""Returns p(M=j, x in C_i) in matrix form"""
jointProbMat = np.zeros((M,M)) # p(M=j, x in C_i)
for i,j in it.product(range(M), range(M)):
jointProbMat[i,j] = np.sum(G[cadId==i,j])
jointProbMat /= G.shape[0]
return jointProbMat
def calcCondiProb(jointProb, margProb):
"""Returns p(M = j | x in C_i)"""
return np.divide(jointProb, margProb[:,None], out=np.zeros_like(jointProb), where=margProb[:,None]!=0)
def estEntropy(condProb):
"""Returns estimated entropy for each cadre"""
return -np.sum(ss.xlogy(condProb, condProb), axis=1) / np.log(2)
class regressionCadreModel(object):
def __init__(self, M=2, gamma=10., lambda_d=0.01, lambda_W=0.01,
alpha_d=0.9, alpha_W=0.9, Tmax=10000, record=100,
eta=1e-3, Nba=50, eps=1e-3):
## hyperparameters / structure
self.M = M # number of cadres
self.gamma = gamma # cadre assignment sharpness
self.lambda_d = lambda_d # regularization strengths
self.lambda_W = lambda_W
self.alpha_d = alpha_d # elastic net mixing weights
self.alpha_W = alpha_W
self.cadFts = None # cadre-assignment feature indices
self.tarFts = None # target-prediction feature indices
self.fitted = False
## optimization settings
self.Tmax = Tmax # maximum iterations
self.record = record # record points
self.eta = eta # initial stepsize
self.Nba = Nba # minibatch size
self.eps = eps # convergence tolerance
## parameters
self.W = 0 # regression weights
self.W0 = 0 # regression biases
self.C = 0 # cadre centers
self.d = 0 # cadre assignment weights
self.sigma = 0 # prediction noise
## data
self.X = None # copy of input data
self.Y = None # copy of target values
## outputs
self.loss = [] # loss trajectory
def get_params(self, deep=True):
return {'M': self.M, 'gamma': self.gamma, 'lambda_d': self.lambda_d,
'lambda_W': self.lambda_W, 'alpha_d': self.alpha_d,
'alpha_W': self.alpha_W, 'Tmax': self.Tmax, 'record': self.record,
'eta': self.eta, 'Nba': self.Nba, 'eps': self.eps}
def set_params(self, **parameters):
for parameter, value in parameters.items():
setattr(self, parameter, value)
return self
def fit(self, Xtr, Ytr, cadFts=None, tarFts=None, inits=dict(), seed=16162):
"""Fit regression cadre model"""
if cadFts is not None:
self.cadFts = cadFts
else:
self.cadFts = np.arange(Xtr.shape[1])
if tarFts is not None:
self.tarFts = tarFts
else:
self.tarFts = np.arange(Xtr.shape[1])
Pcad, Ptar, Py, Ntr = len(self.cadFts), len(self.tarFts), Ytr.shape[1], Xtr.shape[0]
# number of ((cadre-assignment, target-prediction) features, training observations)
self.fitted = True
self.X = Xtr
self.Y = Ytr
############################################
## tensorflow parameters and placeholders ##
############################################
tf.reset_default_graph()
## cadre centers parameter
if 'C' in inits:
C = tf.Variable(inits['C'], dtype=tf.float64, name='C')
else:
C = tf.Variable(np.random.normal(loc=0., scale=0.1, size=(Pcad,self.M)),
dtype=tf.float64, name='C')
## cadre determination weights parameter
if 'd' in inits:
d = tf.Variable(inits['d'], dtype=tf.float64, name='d')
else:
d = tf.Variable(np.random.uniform(size=(Pcad)), dtype=tf.float64, name='d')
## regression hyperplane weights parameter
if 'W' in inits:
W = tf.Variable(inits['W'], dtype=tf.float64, name='W')
else:
W = tf.Variable(np.random.normal(loc=0., scale=0.1, size=(Py, Ptar,self.M)),
dtype=tf.float64, name='W')
## regression hyperplane bias parameter
if 'W0' in inits:
W0 = tf.Variable(inits['w0'], dtype=tf.float64, name='w0')
else:
W0 = tf.Variable(tf.zeros(shape=(Py,self.M), dtype=tf.float64),
dtype=tf.float64, name='w0')
## model error parameter
if 'sigma' in inits:
sigma = tf.Variable(inits['sigma'], dtype=tf.float64, name='sigma')
else:
sigma = tf.Variable(0.1 * np.ones((Py,1)), dtype=tf.float64, name='sigma')
Xcad = tf.placeholder(dtype=tf.float64, shape=(None,Pcad), name='Xcad')
Xtar = tf.placeholder(dtype=tf.float64, shape=(None,Ptar), name='Xtar')
N = tf.cast(tf.gather(tf.shape(Xcad), 0), dtype=tf.float64, name='N')
Y = tf.placeholder(dtype=tf.float64, shape=(None,Py), name='Y')
## T[n,m] = ||x^n - c^m||^2_D
T = tf.einsum('npm,p->nm',
tf.square(tf.map_fn(lambda x: tf.expand_dims(x,1) - C, Xcad)),
tf.abs(d))
## G[n,m] = g_m(x^n)
## = 1 / sum_m' exp(gamma(T[n,m] - T[n,m']))
G = 1 / tf.map_fn(lambda t:
tf.reduce_sum(tf.exp(self.gamma*(tf.expand_dims(t,1) -
tf.expand_dims(t,0))), axis=1), T, name='G')
## E[n,y,m] = e^m_y(x^n)
E = tf.add(tf.einsum('np,ypm->nym', Xtar, W), W0, name='E')
## F[n,y] = f_y(x^n)
F = tf.einsum('nm,nym->ny', G, E, name='F')
## L = 1 / N sum_n sum_m g_m(x^n) * (e_m(x^n) - y_n) ^2
L = (1/2 + 1/N)*tf.reduce_sum(tf.log(sigma**2)) + 1/2*tf.reduce_sum(tf.reduce_mean((F - Y)**2, axis=1) / sigma ** 2) + (eNet(self.alpha_d, self.lambda_d, d) + eNet(self.alpha_W, self.lambda_W, W) / Py) / 2 / N / tf.reduce_prod(sigma)**2
optimizer = tf.train.AdamOptimizer(learning_rate=self.eta).minimize(L)
####################
## learning model ##
####################
with tf.Session() as sess:
tf.global_variables_initializer().run()
## perform optimization
for t in range(self.Tmax):
inds = np.random.choice(Ntr, self.Nba, replace=False)
sess.run(optimizer, feed_dict={Xcad: Xtr[np.ix_(inds, self.cadFts)],
Xtar: Xtr[np.ix_(inds, self.tarFts)],
Y: Ytr[inds]})
# record-keeping
if not t % self.record:
self.loss.append(L.eval(feed_dict={
Xcad: Xtr[:,self.cadFts],
Xtar: Xtr[:,self.tarFts],
Y: Ytr}))
if len(self.loss) > 2 and (np.abs(self.loss[-1] - self.loss[-2]) < self.eps):
break
self.C, self.d, self.W, self.W0, self.sigma = C.eval(), d.eval(), W.eval(), W0.eval(), sigma.eval()
return self
def predictFull(self, Xnew):
"""Returns predicted values, cadre weights, and cadre estimates for new data"""
if not self.fitted: print('warning: model not yet fit')
tf.reset_default_graph()
C = tf.Variable(self.C, dtype=tf.float64, name='C')
d = tf.Variable(self.d, dtype=tf.float64, name='d')
W = tf.Variable(self.W, dtype=tf.float64, name='W')
W0 = tf.Variable(self.W0, dtype=tf.float64, name='w0')
Xcad = tf.placeholder(dtype=tf.float64, shape=(None,len(self.cadFts)), name='X')
Xtar = tf.placeholder(dtype=tf.float64, shape=(None,len(self.tarFts)), name='X')
## T[n,m] = ||x^n - c^m||^2_D
T = tf.einsum('npm,p->nm',
tf.square(tf.map_fn(lambda x: tf.expand_dims(x,1) - C, Xcad)),
tf.abs(d))
## G[n,m] = g_m(x^n)
## = 1 / sum_m' exp(gamma(T[n,m] - T[n,m']))
G = 1 / tf.map_fn(lambda t:
tf.reduce_sum(tf.exp(self.gamma*(tf.expand_dims(t,1) -
tf.expand_dims(t,0))), axis=1), T, name='G')
## E[n,y,m] = e^m_y(x^n)
E = tf.add(tf.einsum('np,ypm->nym', Xtar, W), W0, name='E')
## F[n,y] = f_y(x^n)
F = tf.einsum('nm,nym->ny', G, E, name='F')
bstCd = tf.argmax(G, axis=1, name='bestCadre')
with tf.Session() as sess:
tf.global_variables_initializer().run()
Fnew, Gnew, mNew = sess.run([F, G, bstCd], feed_dict={Xcad: Xnew[:,self.cadFts],
Xtar: Xnew[:,self.tarFts]})
return Fnew, Gnew, mNew
def predict(self, Xnew):
"""Returns predicted values for new data"""
return self.predictFull(Xnew)[0]
def score(self, Xnew, Ynew):
"""Returns average sum-of-squares for new data"""
Fnew = self.predict(Xnew)
return ((Fnew - np.squeeze(Ynew))**2).mean()
def entropy(self, Xnew):
"""Returns estimated entropy for each cadre"""
G, m = self.predictFull(Xnew)[1:]
marg = calcMargiProb(m, self.M)
jont = calcJointProb(G, m, self.M)
cond = calcCondiProb(jont, marg)
return estEntropy(cond)
def getNumberParams(self):
"""Returns number of parameters of a model"""
return | np.prod(self.C.shape) | numpy.prod |
import numpy as np
print("")
print("Ejercicio 1.1")
A = np.array([[4, -2, 1, ], [3, 6, -4], [2, 1, 8]])
A_inversa = np.linalg.inv(A)
print("")
print("Matriz A")
print(A)
print("")
print("Matriz A^-1")
print(A_inversa)
print("")
print("Verificando A * A^-1")
print(A@A_inversa)
print("")
print("Ejercicio 1.2")
A_inversa_analitica = (1/263)*np.array([[52, 17, 2],[-32, 30, 19],[-9, -8, 30]])
print("")
print("Cifras significativas en A^{-1}")
print(np.log10(np.abs(A_inversa-A_inversa_analitica)))
print("")
print("Ejercicio 2")
b = | np.array([12, -25, 32]) | numpy.array |
"""
Module containing the three basic classes: Parameters, Particles, Species.
"""
from copy import deepcopy
from numpy import array, cross, ndarray, pi, sqrt, tanh, zeros
from scipy.constants import physical_constants
from scipy.linalg import norm
from .plasma import Species
from .utilities.exceptions import ParticlesError
class Parameters:
"""
Class containing all the constants and physical constants of the simulation.
Parameters
----------
dic : dict, optional
Dictionary to be copied.
Attributes
----------
a_ws : float
Wigner-Seitz radius. Calculated from the ``total_num_density`` .
equilibration_steps : int
Total number of equilibration timesteps.
eq_dump_step : int
Equilibration dump interval.
magnetization_steps : int
Total number of magnetization timesteps.
mag_dump_step : int
Magnetization dump interval.
production_steps : int
Total number of production timesteps.
prod_dump_step : int
Production dump interval.
box_volume : float
Volume of simulation box.
pbox_volume : float
Volume of initial particle box.
dimensions : int
Number of non-zero dimensions. Default = 3.
fourpie0: float
Electrostatic constant :math:`4\\pi \\epsilon_0`.
num_species : int
Number of species.
kB : float
Boltzmann constant obtained from ``scipy.constants``.
hbar : float
Reduced Planck's constant.
hbar2 : float
Square of reduced Planck's constant.
a0 : float
Bohr Radius.
c0 : float
Speed of light.
qe : float
Elementary charge.
me : float
Electron mass.
eps0 : float
Vacuum electrical permittivity.
eV2K : float
Conversion factor from eV to Kelvin obtained from ``scipy.constants``.
J2erg : float
Conversion factor from Joules to erg. Needed for cgs units.
QFactor : float
Charge Factor defined as :math:`\mathcal Q = \sum_{i}^{N} q_{i}^2` .
Lx : float
Box length in the :math:`x` direction.
Ly : float
Box length in the :math:`y` direction.
Lz : float
Box length in the :math:`z` direction.
e1 : float
Unit vector in the :math:`x` direction.
e2 : float
Unit vector in the :math:`y` direction.
e3 : float
Unit vector in the :math:`z` direction.
LPx : float
Initial particle box length in the :math:`x` direction.
LPy : float
Initial particle box length in the :math:`y` direction.
LPz : float
Initial particle box length in the :math:`z` direction.
ep1 : float
Unit vector of the initial particle box in the :math:`x` direction.
ep2 : float
Unit vector of the initial particle box in the :math:`y` direction.
ep3 : float
Unit vector of the initial particle box in the :math:`z` direction.
input_file : str
YAML Input file with all the simulation's parameters.
T_desired : float
Target temperature for the equilibration phase.
species_num : numpy.ndarray
Number of particles of each species. Shape = (``num_species``)
species_concentrations : numpy.ndarray
Concentration of each species. Shape = (``num_species``)
species_temperatures : numpy.ndarray
Initial temperature of each species. Shape = (``num_species``)
species_masses : numpy.ndarray
Mass of each species. Shape = (``num_species``)
species_charges : numpy.ndarray
Charge of each species. Shape = (``num_species``)
species_names : list
Name of each species. Len = (``num_species``)
species_plasma_frequencies : numpy.ndarray
Plasma Frequency of each species. Shape = (``num_species``)
species_num_dens : numpy.ndarray
Number density of each species. Shape = (``num_species``)
total_ion_temperature : float
Total initial ion temperature calculated as `` = species_concentration @ species_temperatures``.
total_net_charge : float
Total charge in the system.
total_num_density : float
Total number density. Calculated from the sum of :attr:`Species.number_density`.
total_num_ptcls : int
Total number of particles. Calculated from the sum of :attr:`Species.num`.
measure : bool
Flag for production phase.
verbose : bool
Flag for screen output.
simulations_dir : str
Name of directory where to store simulations.
job_dir : str
Directory name of the current job/run
production_dir : str
Directory name where to store simulation's files of the production phase. Default = 'Production'.
equilibration_dir : str
Directory name where to store simulation's file of the equilibration phase. Default = 'Equilibration'.
preprocessing_dir : str
Directory name where to store preprocessing files. Default = "PreProcessing".
postprocessing_dir : str
Directory name where to store postprocessing files. Default = "PostProcessing".
prod_dump_dir : str
Directory name where to store production phase's simulation's checkpoints. Default = 'dumps'.
eq_dump_dir : str
Directory name where to store equilibration phase's simulation's checkpoints. Default = 'dumps'.
job_id : str
Appendix of all simulation's files.
log_file : str
Filename of the simulation's log.
np_per_side : numpy.ndarray
Number of particles per simulation's box side.
The product of its components should be equal to ``total_num_ptcls``.
pre_run : bool
Flag for preprocessing phase.
"""
def __init__(self, dic: dict = None) -> None:
self.particles_input_file = None
self.load_perturb = 0.0
self.initial_lattice_config = "simple_cubic"
self.load_rejection_radius = None
self.load_halton_bases = None
self.load_method = None
self.potential_type = None
self.units = None
self.electron_magnetic_energy = None
self.input_file = None
# Sim box geometry
self.Lx = 0.0
self.Ly = 0.0
self.Lz = 0.0
self.LPx = 0.0
self.LPy = 0.0
self.LPz = 0.0
self.e1 = None
self.e2 = None
self.e3 = None
self.ep1 = None
self.ep2 = None
self.ep3 = None
self.box_lengths = None
self.pbox_lengths = None
self.box_volume = 0.0
self.pbox_volume = 0.0
self.dimensions = 3
# Physical Constants and conversion units
self.J2erg = 1.0e7 # erg/J
self.eps0 = physical_constants["vacuum electric permittivity"][0]
self.fourpie0 = 4.0 * pi * self.eps0
self.mp = physical_constants["proton mass"][0]
self.me = physical_constants["electron mass"][0]
self.qe = physical_constants["elementary charge"][0]
self.hbar = physical_constants["reduced Planck constant"][0]
self.hbar2 = self.hbar**2
self.c0 = physical_constants["speed of light in vacuum"][0]
self.eV2K = physical_constants["electron volt-kelvin relationship"][0]
self.eV2J = physical_constants["electron volt-joule relationship"][0]
self.a0 = physical_constants["Bohr radius"][0]
self.kB = physical_constants["Boltzmann constant"][0]
self.kB_eV = physical_constants["Boltzmann constant in eV/K"][0]
self.a_ws = 0.0
# Phases
self.equilibration_phase = True
self.electrostatic_equilibration = True
self.magnetization_phase = False
self.production_phase = True
# Timing
self.equilibration_steps = 0
self.production_steps = 0
self.magnetization_steps = 0
self.eq_dump_step = 1
self.prod_dump_step = 1
self.mag_dump_step = 1
# Control
self.job_id = None
self.job_dir = None
self.log_file = None
self.measure = False
self.magnetized = False
self.plot_style = None
self.pre_run = False
self.simulations_dir = "Simulations"
self.production_dir = "Production"
self.magnetization_dir = "Magnetization"
self.equilibration_dir = "Equilibration"
self.preprocessing_dir = "PreProcessing"
self.postprocessing_dir = "PostProcessing"
self.prod_dump_dir = "dumps"
self.eq_dump_dir = "dumps"
self.mag_dump_dir = "dumps"
self.verbose = True
self.restart_step = None
self.np_per_side = None
self.num_species = 1
self.magnetic_field = None
self.species_lj_sigmas = None
self.species_names = None
self.species_num = None
self.species_num_dens = None
self.species_concentrations = None
self.species_temperatures = None
self.species_temperatures_eV = None
self.species_masses = None
self.species_charges = None
self.species_plasma_frequencies = None
self.species_cyclotron_frequencies = None
self.species_couplings = None
self.coupling_constant = 0.0
self.total_num_density = 0.0
self.total_num_ptcls = 0
self.total_plasma_frequency = 0.0
self.total_debye_length = 0.0
self.total_mass_density = 0.0
self.total_ion_temperature = 0.0
self.T_desired = 0.0
self.total_net_charge = 0.0
self.QFactor = 0.0
self.average_charge = None
self.average_mass = None
self.hydrodynamic_frequency = None
if dic:
self.from_dict(dic)
def __repr__(self):
sortedDict = dict(sorted(self.__dict__.items(), key=lambda x: x[0].lower()))
disp = "Parameters( \n"
for key, value in sortedDict.items():
disp += "\t{} : {}\n".format(key, value)
disp += ")"
return disp
def __copy__(self):
"""Make a shallow copy of the object using copy by creating a new instance of the object and copying its __dict__."""
# Create a new object
_copy = type(self)(dic=self.__dict__)
return _copy
def __deepcopy__(self, memodict={}):
"""
Make a deepcopy of the object.
Parameters
----------
memodict: dict
Dictionary of id's to copies
Returns
-------
_copy: :class:`sarkas.core.Parameters`
A new Parameters class.
"""
id_self = id(self) # memorization avoids unnecessary recursion
_copy = memodict.get(id_self)
if _copy is None:
_copy = type(self)()
# Make a deepcopy of the mutable arrays using numpy copy function
for k, v in self.__dict__.items():
_copy.__dict__[k] = deepcopy(v, memodict)
return _copy
def calc_coupling_constant(self, species: list):
"""
Calculate the coupling constant of each species and the total coupling constant. For more information see
the theory pages.
Parameters
----------
species: list
List of ``sarkas.plasma.Species`` objects.
"""
z_avg = (self.species_charges.transpose()) @ self.species_concentrations
for i, sp in enumerate(species):
const = self.fourpie0 * self.kB
sp.calc_coupling(self.a_ws, z_avg, const)
self.species_couplings[i] = sp.coupling
self.coupling_constant += sp.concentration * sp.coupling
def calc_electron_properties(self, species: list):
"""Check whether the electrons are a dynamical species or not."""
# Check for electrons as dynamical species
if "e" not in self.species_names:
electrons = {
"name": "electron_background",
"number_density": (
self.species_charges.transpose() @ self.species_concentrations * self.total_num_density / self.qe
),
}
if hasattr(self, "electron_temperature_eV"):
electrons["temperature_eV"] = self.electron_temperature_eV
electrons["temperature"] = self.eV2K * self.electron_temperature_eV
elif hasattr(self, "electron_temperature"):
electrons["temperature"] = self.electron_temperature
electrons["temperature_eV"] = self.electron_temperature / self.eV2K
else:
electrons["temperature"] = self.total_ion_temperature
electrons["temperature_eV"] = self.total_ion_temperature / self.eV2K
electrons["mass"] = self.me
electrons["Z"] = -1.0
electrons["charge"] = electrons["Z"] * self.qe
electrons["spin_degeneracy"] = 2.0
e_species = Species(electrons)
e_species.copy_params(self)
e_species.calc_ws_radius()
e_species.calc_plasma_frequency()
e_species.calc_debye_length()
e_species.calc_landau_length()
# Electron should be the last species if not dynamical
species.append(e_species)
else:
# Electron should be the first species if dynamical
e_species = species[0]
e_species.calc_debroglie_wavelength()
e_species.calc_quantum_attributes(spin_statistics="fermi-dirac")
# Electron WS radius
e_species.a_ws = (3.0 / (4.0 * pi * e_species.number_density)) ** (1.0 / 3.0)
# Brueckner parameters
e_species.rs = e_species.a_ws / self.a0
# Other electron parameters
e_species.degeneracy_parameter = self.kB * e_species.temperature / e_species.Fermi_energy
e_species.relativistic_parameter = self.hbar * e_species.Fermi_wavenumber / (self.me * self.c0)
# Eq. 1 in Murillo Phys Rev E 81 036403 (2010)
e_species.coupling = e_species.charge**2 / (
self.fourpie0 * e_species.Fermi_energy * e_species.a_ws * sqrt(1.0 + e_species.degeneracy_parameter**2)
)
# Warm Dense Matter Parameter, Eq.3 in Murillo Phys Rev E 81 036403 (2010)
e_species.wdm_parameter = 2.0 / (e_species.degeneracy_parameter + 1.0 / e_species.degeneracy_parameter)
e_species.wdm_parameter *= 2.0 / (e_species.coupling + 1.0 / e_species.coupling)
if self.magnetized:
b_mag = norm(self.magnetic_field) # magnitude of B
if self.units == "cgs":
e_species.cyclotron_frequency = self.qe * b_mag / self.c0 / self.me
else:
e_species.cyclotron_frequency = self.qe * b_mag / self.me
# Inverse temperature for convenience
beta_e = 1.0 / (self.kB * e_species.temperature)
e_species.magnetic_energy = self.hbar * e_species.cyclotron_frequency
tan_arg = 0.5 * self.hbar * e_species.cyclotron_frequency * beta_e
# Perpendicular correction
e_species.horing_perp_correction = (e_species.plasma_frequency / e_species.cyclotron_frequency) ** 2
e_species.horing_perp_correction *= 1.0 - tan_arg / tanh(tan_arg)
e_species.horing_perp_correction += 1
# Parallel correction
e_species.horing_par_correction = 1 - (self.hbar * beta_e * e_species.plasma_frequency) ** 2 / 12.0
# Quantum Anisotropy Parameter
e_species.horing_delta = self.horing_perp_correction - 1
e_species.horing_delta += (self.hbar * beta_e * e_species.cyclotron_frequency) ** 2 / 12
e_species.horing_delta /= self.horing_par_correction
def calc_parameters(self, species: list):
"""
Assign the parsed parameters.
Parameters
----------
species : list
List of :class:`sarkas.plasma.Species` .
"""
self.set_species_attributes(species)
self.create_species_arrays(species)
if self.magnetized:
self.magnetic_field = array(self.magnetic_field, dtype=float)
self.calc_magnetic_parameters(species)
self.sim_box_setup()
def calc_magnetic_parameters(self, species: list):
"""
Calculate cyclotron frequency in case of a magnetized simulation.
Parameters
----------
species: list,
List of :class:`sarkas.plasma.Species`.
"""
self.species_cyclotron_frequencies = zeros(self.num_species)
for i, sp in enumerate(species):
if self.units == "cgs":
sp.calc_cyclotron_frequency(norm(self.magnetic_field) / self.c0)
else:
sp.calc_cyclotron_frequency(norm(self.magnetic_field))
sp.beta_c = sp.cyclotron_frequency / sp.plasma_frequency
self.species_cyclotron_frequencies[i] = sp.cyclotron_frequency
def check_units(self) -> None:
"""Adjust default physical constants for cgs unit system and check for LJ potential."""
# Physical constants
if self.units == "cgs":
self.kB *= self.J2erg
self.c0 *= 1e2 # cm/s
self.mp *= 1e3
# Coulomb to statCoulomb conversion factor. See https://en.wikipedia.org/wiki/Statcoulomb
C2statC = 1.0e-01 * self.c0
self.hbar = self.J2erg * self.hbar
self.hbar2 = self.hbar**2
self.qe *= C2statC
self.me *= 1.0e3
self.eps0 = 1.0
self.fourpie0 = 1.0
self.a0 *= 1e2
if self.potential_type == "lj":
self.fourpie0 = 1.0
self.species_lj_sigmas = zeros(self.num_species)
def create_species_arrays(self, species: list):
"""
Get species information into arrays for the postprocessing part.
Parameters
----------
species : list
List of :class:`sarkas.plasma.Species` .
"""
self.num_species = len(species)
# Initialize the arrays containing species attributes. This is needed for postprocessing
self.species_names = []
self.species_num = zeros(self.num_species, dtype=int)
self.species_num_dens = zeros(self.num_species)
self.species_concentrations = zeros(self.num_species)
self.species_temperatures = zeros(self.num_species)
self.species_temperatures_eV = zeros(self.num_species)
self.species_masses = | zeros(self.num_species) | numpy.zeros |
#!/usr/bin/env python
# builtin modules
import os
# dependencies
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import Ellipse, Wedge
from uncertainties import unumpy
from dtk import control
# local module imports
from . import bicycle
from . import inertia
from . import com
from . import io
from . import geometry
from . import period
from . import rider
#from plot import plot_eigenvalues
class Bicycle(object):
"""
An object for a bicycle. A bicycle has parameters and can have a rider
attached to it. That's about it for now.
"""
def __new__(cls, bicycleName, pathToData='.', forceRawCalc=False,
forcePeriodCalc=False):
'''Returns a NoneType object if there is no directory for the bicycle.'''
# is there a data directory for this bicycle? if not, tell the user to
# put some data in the folder so we have something to work with!
try:
pathToBicycle = os.path.join(pathToData, 'bicycles', bicycleName)
if os.path.isdir(pathToBicycle) == True:
print("We have foundeth a directory named: " +
"{0}.".format(pathToBicycle))
return super(Bicycle, cls).__new__(cls)
else:
raise ValueError
except:
mes = """Are you nuts?! Make a directory called '{0}' with basic data
for your bicycle in this directory: '{1}'. Then I can actually
create a bicycle object. You may either need to change to the
correct directory or reset the pathToData argument.""".format(bicycleName, pathToData)
print(mes)
return None
def __init__(self, bicycleName, pathToData='.', forceRawCalc=False,
forcePeriodCalc=False):
"""
Creates a bicycle object and sets the parameters based on the available
data.
Parameters
----------
bicycleName : string
The short name of your bicicleta. It should be one word with the
first letter capitalized and all other letters lower case. You
should have a matching directory under `<pathToData>/bicycles/`.
For example: `<pathToData>/bicycles/Shortname`.
pathToData : string
This is the path to the folder where the bicycle/rider parameters
and raw data are stored. The default is the current working
directory.
forceRawCalc : boolean
Forces a recalculation of the benchmark parameters from the measured
parameters. Otherwise it will only run the calculation if there is
no benchmark parameter file.
forcePeriodCalc : boolean
Forces a recalculation of the periods from the oscillation data.
Notes
-----
Bicycles are assumed not to have a rider when initially loaded.
"""
self.bicycleName = bicycleName
pathToBicycles = os.path.join(pathToData, 'bicycles')
# the directory where the files for this bicycle are stored
self.directory = os.path.join(pathToBicycles, bicycleName)
# bicycles are assumed not to have a rider when initially loaded
self.hasRider = False
self.riderPar = {}
self.human = None
self.parameters = {}
# if there are some parameter files, then load them
if 'Parameters' in os.listdir(self.directory):
parDir = os.path.join(self.directory, 'Parameters')
parFiles = os.listdir(parDir)
for parFile in parFiles:
# remove the extension
fname = os.path.splitext(parFile)[0]
# get the bike and the parameter set type
bike, ptype = io.space_out_camel_case(fname, output='list')
# load the parameters
pathToFile = os.path.join(parDir, parFile)
self.parameters[ptype] = io.load_parameter_text_file(pathToFile)
# this is where the raw data files from the pendulum oscillations are
# stored
rawDataDir = os.path.join(self.directory, 'RawData')
# it would be more robust to see if there are enough files in the
# RawData directory, but that isn't implemented yet. For now you'll
# just get and error sometime down the road when a period for the
# missing files is needed.
isRawDataDir = 'RawData' in os.listdir(self.directory)
if isRawDataDir:
print("Found the RawData directory:", rawDataDir)
isMeasuredFile = bicycleName + 'Measured.txt' in os.listdir(rawDataDir)
else:
isMeasuredFile = False
isBenchmark = 'Benchmark' in self.parameters.keys()
# the user wants to force a recalc and the data is there
conOne = forceRawCalc and isRawDataDir and isMeasuredFile
# the user doesn't want to force a recalc and there are no benchmark
# parameters
conTwo = not forceRawCalc and not isBenchmark
if conOne or conTwo:
print("Recalcuting the parameters.")
par, extras = self.calculate_from_measured(
forcePeriodCalc=forcePeriodCalc)
self.parameters['Benchmark'] = par
self.extras = extras
print("The glory of the %s parameters are upon you!"
% self.bicycleName)
elif not forceRawCalc and isBenchmark:
# we already have what we need
stmt1 = "Looks like you've already got some parameters for %s, "
stmt2 = "use forceRawCalc to recalculate."
print((stmt1 + stmt2) % self.bicycleName)
# load the measured.txt file if it exists
pathToRawFile = os.path.join(rawDataDir,
self.bicycleName + 'Measured.txt')
try:
self.parameters['Measured'] = \
io.load_parameter_text_file(pathToRawFile)
except IOError:
pass
else:
print('''There is no data available. Create
bicycles/{sn}/Parameters/{sn}Benchmark.txt and/or fill
bicycle/{sn}/RawData/ with pendulum data mat files and the
{sn}Measured.txt file'''.format(sn=bicycleName))
def __str__(self):
if self.hasRider:
desc = "{0} with {1} on board.".format(self.bicycleName,
self.riderName)
else:
desc = "{0} with no one on board.".format(self.bicycleName)
return desc
def save_parameters(self, filetype='text'):
"""
Saves all the parameter sets to file.
Parameters
----------
filetype : string, optional
- 'text' : a text file with parameters as `c = 0.10+/-0.01\n`
- 'matlab' : matlab .mat file
- 'pickle' : python pickled dictionary
"""
if self.hasRider:
pathToData = os.path.split(os.path.split(self.directory)[0])[0]
pathToParDir = os.path.join(pathToData, 'riders', self.riderName,
'Parameters')
pathToCombDir = os.path.join(pathToParDir, 'Combined')
if not os.path.exists(pathToCombDir):
os.makedirs(pathToCombDir)
fileName = self.riderName + self.bicycleName
# don't resave the measured parameters
psets = [x for x in self.riderPar.keys() if x != 'Measured']
parameters = self.riderPar
print(('This bicycle has a rider, {0}, so the data will be ' +
'saved here: {1}').format(self.riderName, pathToParDir))
else:
pathToParDir = os.path.join(self.directory, 'Parameters')
fileName = self.bicycleName
# don't resave the measured parameters
psets = [x for x in self.parameters.keys() if x != 'Measured']
parameters = self.parameters
print(('This bicycle has no rider so the data will be ' +
'saved here: {0}').format(pathToParDir))
if filetype == 'text':
for pset in psets:
fileName = fileName + pset + '.txt'
pathToTxtFile = os.path.join(pathToParDir, fileName)
io.write_parameter_text_file(pathToTxtFile, parameters[pset])
if self.hasRider:
pathToCombFile = os.path.join(pathToCombDir, fileName)
io.write_parameter_text_file(pathToCombFile,
self.parameters[pset])
elif filetype == 'matlab':
# this should handle the uncertainties properly
raise NotImplementedError("Doesn't work yet.")
elif filetype == 'pickle':
raise NotImplementedError("Doesn't work yet.")
def show_pendulum_photos(self):
"""
Opens up the pendulum photos in eye of gnome for inspection.
This only works in Linux and if eog is installed. Maybe check pythons
xdg-mime model for having this work cross platform.
"""
photoDir = os.path.join(self.directory, 'Photos')
try:
if os.path.isdir(photoDir):
os.system('eog ' + os.path.join(photoDir, '*.*'))
else:
print("There are no photos of your bicycle.")
except:
raise NotImplementedError("This works only works for linux with " +
"Eye of Gnome installed.")
def steer_assembly_moment_of_inertia(self, handlebar=True, fork=True,
wheel=True, aboutSteerAxis=False, nominal=False):
"""
Returns the inertia tensor of the steer assembly with respect to a
reference frame aligned with the steer axis.
Parameters
----------
handlebar : boolean, optional
If true the handlebar will be included in the calculation.
fork : boolean, optional
If true the fork will be included in the calculation.
wheel : boolean, optional
If true then the wheel will be included in the calculation.
aboutSteerAxis : boolean, optional
If true the inertia tensor will be with respect to a point made
from the projection of the center of mass onto the steer axis.
nominal : boolean, optional
If true the nominal values will be returned instead of a uarray.
Returns
-------
iAss : float
Inertia tensor of the specified steer assembly parts with respect
to a reference frame aligned with the steer axis.
Notes
-----
The 3 component is aligned with the steer axis (pointing downward), the
1 component is perpendicular to the steer axis (pointing forward) and
the 2 component is perpendicular to the steer axis (pointing to the
right).
This function does not currently take into account the flywheel, D, if
it is defined, beware.
"""
# load in the Benchmark parameter set
par = self.parameters['Benchmark']
if 'mD' in par.keys():
print("You have a flywheel defined. Beware that it is ignored in "
+ "the calculations and the results do not reflect that it is "
+ "there.")
# there should always be either an H (handlebar/fork) and sometimes
# there is a G (handlebar) and S (fork) if the fork and handlebar were
# measured separately
try:
if fork and handlebar:
# handlebar/fork
I = inertia.part_inertia_tensor(par, 'H')
m = par['mH']
x = par['xH']
z = par['zH']
elif fork and not handlebar:
# fork alone
I = inertia.part_inertia_tensor(par, 'S')
m = par['mS']
x = par['xS']
z = par['zS']
elif handlebar and not fork:
# handlebar alone
I = inertia.part_inertia_tensor(par, 'G')
m = par['mG']
x = par['xG']
z = par['zG']
else:
# if neither set to zero
I = np.zeros((3, 3))
m = 0.
x = 0.
z = 0.
except KeyError:
raise ValueError("The fork and handlebar were not measured " +
"separately for this bicycle." +
" Try making both the fork and handlebar either" +
" both True or both False.")
if wheel:
# list the mass and com of the handlebar/assembly and the front
# wheel
masses = np.array([m, par['mF']])
coords = np.array([[x, par['w']],
[0., 0.],
[z, -par['rF']]])
# mass and com of the entire assembly
mAss, cAss = com.total_com(coords, masses)
# front wheel inertia in the benchmark reference frame about the
# com
IF = inertia.part_inertia_tensor(par, 'F')
# distance from the fork/handlebar assembly (without wheel) to the
# new center of mass for the assembly with the wheel
d = np.array([x - cAss[0], 0., z - cAss[2]])
# distance from the front wheel center to the new center of mass
# for the assembly with the wheel
dF = np.array([par['w'] - cAss[0],
0.,
-par['rF'] - cAss[2]])
# this is the inertia of the assembly about the com with reference
# to the benchmark bicycle reference frame
iAss = (inertia.parallel_axis(I, m, d) +
inertia.parallel_axis(IF, par['mF'], dF))
# this is the inertia of the assembly about a reference frame aligned with
# the steer axis and through the center of mass
iAssRot = inertia.rotate_inertia_tensor(iAss, par['lam'])
else: # don't add the wheel
mAss = m
cAss = np.array([x, 0., z])
iAssRot = inertia.rotate_inertia_tensor(I, par['lam'])
if aboutSteerAxis:
# this is the distance from the assembly com to the steer axis
distance = geometry.distance_to_steer_axis(par['w'], par['c'],
par['lam'], cAss)
print("handlebar cg distance", distance)
# now calculate the inertia about the steer axis of the rotated frame
iAss = inertia.parallel_axis(iAssRot, mAss, np.array([distance, 0., 0.]))
else:
iAss = iAssRot
if nominal:
return unumpy.nominal_values(iAss)
else:
return iAss
def calculate_from_measured(self, forcePeriodCalc=False):
'''Calculates the parameters from measured data.'''
rawDataDir = os.path.join(self.directory, 'RawData')
pathToRawFile = os.path.join(rawDataDir, self.bicycleName + 'Measured.txt')
# load the measured parameters
self.parameters['Measured'] = io.load_parameter_text_file(pathToRawFile)
forkIsSplit = is_fork_split(self.parameters['Measured'])
# if the the user doesn't specifiy to force period calculation, then
# see if enough data is actually available in the *Measured.txt file to
# do the calculations
if not forcePeriodCalc:
forcePeriodCalc = period.check_for_period(self.parameters['Measured'],
forkIsSplit)
if forcePeriodCalc == True:
# get the list of mat files associated with this bike
matFiles = [x for x in os.listdir(rawDataDir)
if x.endswith('.mat')]
matFiles.sort()
# calculate the period for each file for this bicycle
periods = period.calc_periods_for_files(rawDataDir, matFiles, forkIsSplit)
# add the periods to the measured parameters
self.parameters['Measured'].update(periods)
io.write_periods_to_file(pathToRawFile, periods)
return calculate_benchmark_from_measured(self.parameters['Measured'])
def add_rider(self, riderName, reCalc=False, draw=False):
"""
Adds the inertial effects of a rigid rider to the bicycle.
Parameters
----------
riderName : string
A rider name that corresponds to a folder in
`<pathToData>/riders/`.
reCalc : boolean, optional
If true, the rider parameters will be recalculated.
draw : boolean, optional
If true, visual python will be used to draw a three dimensional
image of the rider.
"""
# can't draw the rider model without the human object
if draw:
reCalc=True
# first check to see if a rider has already been added
if self.hasRider == True:
print(("D'oh! This bicycle already has {0} as a " +
"rider!").format(self.riderName))
else:
print("There is no rider on the bicycle, now adding " +
"{0}.".format(riderName))
pathToData = os.path.split(os.path.split(self.directory)[0])[0]
# get the path to the rider's folder
pathToRider = os.path.join(pathToData, 'riders', riderName)
# load in the parameters
bicyclePar = self.parameters['Benchmark']
bicycleName = self.bicycleName
if reCalc == True:
print("Calculating the human configuration.")
# run the calculations
try:
measuredPar = self.parameters['Measured']
except KeyError:
print('The measured bicycle parameters need to be ' +
'available, create your bicycle such that they ' +
'are available.')
raise
riderPar, human, bicycleRiderPar =\
rider.configure_rider(pathToRider, bicycleName, bicyclePar,
measuredPar, draw)
else:
pathToParFile = os.path.join(pathToRider, 'Parameters',
riderName + self.bicycleName + 'Benchmark.txt')
try:
# load the parameter file
riderPar = io.load_parameter_text_file(pathToParFile)
except IOError:
# file doesn't exist so run the calculations
print("No parameter files found, calculating the human " +
"configuration.")
try:
measuredPar = self.parameters['Measured']
except KeyError:
print('The measured bicycle parameters need to be ' +
'available, create your bicycle such that they ' +
'are available.')
raise
riderPar, human, bicycleRiderPar =\
rider.configure_rider(pathToRider, bicycleName,
bicyclePar, measuredPar, draw)
else:
print("Loaded the precalculated parameters from " +
"{0}".format(pathToParFile))
bicycleRiderPar = inertia.combine_bike_rider(bicyclePar, riderPar)
# set the attributes
self.riderPar['Benchmark'] = riderPar
try:
self.human = human
except NameError:
self.human = None
self.parameters['Benchmark'] = bicycleRiderPar
self.riderName = riderName
self.hasRider = True
def plot_bicycle_geometry(self, show=True, pendulum=True,
centerOfMass=True, inertiaEllipse=True):
'''Returns a figure showing the basic bicycle geometry, the centers of
mass and the moments of inertia.
Notes
-----
If the flywheel is defined, it's center of mass corresponds to the
front wheel and is not depicted in the plot.
'''
par = io.remove_uncertainties(self.parameters['Benchmark'])
parts = get_parts_in_parameters(par)
try:
slopes = io.remove_uncertainties(self.extras['slopes'])
intercepts = io.remove_uncertainties(self.extras['intercepts'])
penInertias = io.remove_uncertainties(self.extras['pendulumInertias'])
except AttributeError:
pendulum = False
fig = plt.figure()
ax = plt.axes()
# define some colors for the parts
numColors = len(parts)
cmap = plt.get_cmap('gist_rainbow')
partColors = {}
for i, part in enumerate(parts):
partColors[part] = cmap(1. * i / numColors)
if inertiaEllipse:
# plot the principal moments of inertia
for j, part in enumerate(parts):
I = inertia.part_inertia_tensor(par, part)
Ip, C = inertia.principal_axes(I)
if part == 'R':
center = np.array([0., par['rR']])
elif part in 'FD':
center = np.array([par['w'], par['rF']])
else:
center = np.array([par['x' + part], -par['z' + part]])
# which row in C is the y vector
uy = np.array([0., 1., 0.])
for i, row in enumerate(C):
if np.abs(np.sum(row - uy)) < 1E-10:
yrow = i
# remove the row for the y vector
Ip2D = np.delete(Ip, yrow, 0)
# remove the column and row associated with the y
C2D = np.delete(np.delete(C, yrow, 0), 1, 1)
# make an ellipse
Imin = Ip2D[0]
Imax = Ip2D[1]
# get width and height of a ellipse with the major axis equal
# to one
unitWidth = 1. / 2. / np.sqrt(Imin) * np.sqrt(Imin)
unitHeight = 1. / 2. / np.sqrt(Imax) * np.sqrt(Imin)
# now scaled the width and height relative to the maximum
# principal moment of inertia
width = Imax * unitWidth
height = Imax * unitHeight
angle = -np.degrees(np.arccos(C2D[0, 0]))
ellipse = Ellipse((center[0], center[1]), width, height,
angle=angle, fill=False,
color=partColors[part], alpha=0.25)
ax.add_patch(ellipse)
# plot the ground line
x = np.array([-par['rR'],
par['w'] + par['rF']])
plt.plot(x, np.zeros_like(x), 'k')
# plot the rear wheel
c = plt.Circle((0., par['rR']), radius=par['rR'], fill=False)
ax.add_patch(c)
# plot the front wheel
c = plt.Circle((par['w'], par['rF']), radius=par['rF'], fill=False)
ax.add_patch(c)
# plot the fundamental bike
deex, deez = geometry.fundamental_geometry_plot_data(par)
plt.plot(deex, -deez, 'k')
# plot the steer axis
dx3 = deex[2] + deez[2] * (deex[2] - deex[1]) / (deez[1] - deez[2])
plt.plot([deex[2], dx3], [-deez[2], 0.], 'k--')
# don't plot the pendulum lines if a rider has been added because the
# inertia has changed
if self.hasRider:
pendulum = False
if pendulum:
# plot the pendulum axes for the measured parts
for j, pair in enumerate(slopes.items()):
part, slopeSet = pair
xcom, zcom = par['x' + part], par['z' + part]
for i, m in enumerate(slopeSet):
b = intercepts[part][i]
xPoint, zPoint = geometry.project_point_on_line((m, b),
(xcom, zcom))
comLineLength = penInertias[part][i]
xPlus = comLineLength / 2. * np.cos(np.arctan(m))
x = np.array([xPoint - xPlus,
xPoint + xPlus])
z = -m * x - b
plt.plot(x, z, color=partColors[part])
# label the pendulum lines with a number
plt.text(x[0], z[0], str(i + 1))
if centerOfMass:
# plot the center of mass location
def com_symbol(ax, center, radius, color='b'):
'''Returns axis with center of mass symbol.'''
c = plt.Circle(center, radius=radius, fill=False)
w1 = Wedge(center, radius, 0., 90.,
color=color, ec=None, alpha=0.5)
w2 = Wedge(center, radius, 180., 270.,
color=color, ec=None, alpha=0.5)
ax.add_patch(w1)
ax.add_patch(w2)
ax.add_patch(c)
return ax
# radius of the CoM symbol
sRad = 0.03
# front wheel CoM
ax = com_symbol(ax, (par['w'], par['rF']), sRad,
color=partColors['F'])
plt.text(par['w'] + sRad, par['rF'] + sRad, 'F')
# rear wheel CoM
ax = com_symbol(ax, (0., par['rR']), sRad,
color=partColors['R'])
plt.text(0. + sRad, par['rR'] + sRad, 'R')
for j, part in enumerate([x for x in parts
if x not in 'RFD']):
xcom = par['x' + part]
zcom = par['z' + part]
ax = com_symbol(ax, (xcom, -zcom), sRad,
color=partColors[part])
plt.text(xcom + sRad, -zcom + sRad, part)
if 'H' not in parts:
ax = com_symbol(ax, (par['xH'], -par['zH']), sRad)
plt.text(par['xH'] + sRad, -par['zH'] + sRad, 'H')
plt.axis('equal')
plt.ylim((0., 1.))
plt.title(self.bicycleName)
# if there is a rider on the bike, make a simple stick figure
if self.human:
human = self.human
mpar = self.parameters['Measured']
bpar = self.parameters['Benchmark']
# K2: lower leg, tip of foot to knee
start = rider.yeadon_vec_to_bicycle_vec(human.K2.end_pos, mpar, bpar)
end = rider.yeadon_vec_to_bicycle_vec(human.K2.pos, mpar, bpar)
plt.plot([start[0, 0], end[0, 0]],
[-start[2, 0], -end[2, 0]], 'k')
# K1: upper leg, knee to hip
start = rider.yeadon_vec_to_bicycle_vec(human.K2.pos, mpar, bpar)
end = rider.yeadon_vec_to_bicycle_vec(human.K1.pos, mpar, bpar)
plt.plot([start[0, 0], end[0, 0]],
[-start[2, 0], -end[2, 0]], 'k')
# torso
start = rider.yeadon_vec_to_bicycle_vec(human.K1.pos, mpar, bpar)
end = rider.yeadon_vec_to_bicycle_vec(human.B1.pos, mpar, bpar)
plt.plot([start[0, 0], end[0, 0]],
[-start[2, 0], -end[2, 0]], 'k')
# B1: upper arm
start = rider.yeadon_vec_to_bicycle_vec(human.B1.pos, mpar, bpar)
end = rider.yeadon_vec_to_bicycle_vec(human.B2.pos, mpar, bpar)
plt.plot([start[0, 0], end[0, 0]],
[-start[2, 0], -end[2, 0]], 'k')
# B2: lower arm, elbow to tip of fingers
start = rider.yeadon_vec_to_bicycle_vec(human.B2.pos, mpar, bpar)
end = rider.yeadon_vec_to_bicycle_vec(human.B2.end_pos, mpar, bpar)
plt.plot([start[0, 0], end[0, 0]],
[-start[2, 0], -end[2, 0]], 'k')
# C: chest/head
start = rider.yeadon_vec_to_bicycle_vec(human.B1.pos, mpar, bpar)
end = rider.yeadon_vec_to_bicycle_vec(human.C.end_pos, mpar, bpar)
plt.plot([start[0, 0], end[0, 0]],
[-start[2, 0], -end[2, 0]], 'k')
if show:
fig.show()
return fig
def canonical(self, nominal=False):
"""
Returns the canonical velocity and gravity independent matrices for
the Whipple bicycle model linearized about the nominal
configuration.
Parameters
----------
nominal : boolean, optional
The default is false and uarrays are returned with the
calculated uncertainties. If true ndarrays are returned without
uncertainties.
Returns
-------
M : uarray, shape(2,2)
Mass matrix.
C1 : uarray, shape(2,2)
Velocity independent damping matrix.
K0 : uarray, shape(2,2)
Gravity independent part of the stiffness matrix.
K2 : uarray, shape(2,2)
Velocity squared independent part of the stiffness matrix.
Notes
-----
The canonical matrices complete the following equation:
M * q'' + v * C1 * q' + [g * K0 + v**2 * K2] * q = f
where:
q = [phi, delta]
f = [Tphi, Tdelta]
phi
Bicycle roll angle.
delta
Steer angle.
Tphi
Roll torque.
Tdelta
Steer torque.
v
Bicylce speed.
If you have a flywheel defined, body D, it will completely be
ignored in these results. These results are strictly for the Whipple
bicycle model.
"""
par = self.parameters['Benchmark']
M, C1, K0, K2 = bicycle.benchmark_par_to_canonical(par)
if nominal is True:
return (unumpy.nominal_values(M),
unumpy.nominal_values(C1),
unumpy.nominal_values(K0),
unumpy.nominal_values(K2))
elif nominal is False:
return M, C1, K0, K2
else:
raise ValueError('nominal must be True or False')
def state_space(self, speed, nominal=False):
"""
Returns the A and B matrices for the Whipple model linearized about
the upright constant velocity configuration.
Parameters
----------
speed : float
The speed of the bicycle.
nominal : boolean, optional
The default is false and uarrays are returned with the calculated
uncertainties. If true ndarrays are returned without uncertainties.
Returns
-------
A : ndarray, shape(4,4)
The state matrix.
B : ndarray, shape(4,2)
The input matrix.
Notes
-----
``A`` and ``B`` describe the Whipple model in state space form:
x' = A * x + B * u
where
The states are [roll angle,
steer angle,
roll rate,
steer rate]
The inputs are [roll torque,
steer torque]
If you have a flywheel defined, body D, it will completely be ignored
in these results. These results are strictly for the Whipple bicycle
model.
"""
M, C1, K0, K2 = self.canonical()
g = self.parameters['Benchmark']['g']
A, B = bicycle.ab_matrix(M, C1, K0, K2, speed, g)
if nominal is True:
return (unumpy.nominal_values(A), unumpy.nominal_values(B))
elif nominal is False:
return A, B
else:
raise ValueError('nominal must be True or False')
def eig(self, speeds):
'''Returns the eigenvalues and eigenvectors of the Whipple bicycle
model linearized about the nominal configuration.
Parameters
----------
speeds : ndarray, shape (n,) or float
The speed at which to calculate the eigenvalues.
Returns
-------
evals : ndarray, shape (n, 4)
eigenvalues
evecs : ndarray, shape (n, 4, 4)
eigenvectors
Notes
-----
If you have a flywheel defined, body D, it will completely be ignored
in these results. These results are strictly for the Whipple bicycle
model.
'''
# this allows you to enter a float
try:
speeds.shape
except AttributeError:
speeds = np.array([speeds])
par = io.remove_uncertainties(self.parameters['Benchmark'])
M, C1, K0, K2 = bicycle.benchmark_par_to_canonical(par)
m, n = 4, speeds.shape[0]
evals = np.zeros((n, m), dtype='complex128')
evecs = np.zeros((n, m, m), dtype='complex128')
for i, speed in enumerate(speeds):
A, B = bicycle.ab_matrix(M, C1, K0, K2, speed, par['g'])
w, v = np.linalg.eig(A)
evals[i] = w
evecs[i] = v
return evals, evecs
def plot_eigenvalues_vs_speed(self, speeds, fig=None, generic=False,
color='black', show=False, largest=False,
linestyle='-'):
"""Returns a plot of the eigenvalues versus speed for the current
benchmark parameters.
Parameters
----------
speeds : ndarray, shape(n,)
An array of speeds to calculate the eigenvalues at.
fig : matplotlib figure, optional
A figure to plot to.
generic : boolean
If true the lines will all be the same color and the modes will
not be labeled.
color : matplotlib color
If generic is true this will be the color of the plot lines.
largest : boolean
If true, only the largest eigenvalue is plotted.
Notes
-----
If you have a flywheel defined, body D, it will completely be
ignored in these results. These results are strictly for the Whipple
bicycle model.
"""
# sort the speeds in case they aren't
speeds = np.sort(speeds)
# figure properties
figwidth = 6. # in inches
goldenMean = (np.sqrt(5.0) - 1.0) / 2.0
figsize = [figwidth, figwidth * goldenMean]
params = {#'backend': 'ps',
'axes.labelsize': 8,
'text.fontsize': 10,
'legend.fontsize': 8,
'xtick.labelsize': 6,
'ytick.labelsize': 6,
'figure.figsize': figsize
}
plt.rcParams.update(params)
if not fig:
fig = plt.figure(figsize=figsize)
plt.axes([0.125, 0.2, 0.95 - 0.125, 0.85 - 0.2])
evals, evecs = self.eig(speeds)
if largest:
generic = True
if generic:
weaveColor = color
capsizeColor = color
casterColor = color
legend = ['_nolegend_'] * 6
legend[5] = self.bicycleName
maxLabel = self.bicycleName
else:
weaveColor = 'blue'
capsizeColor = 'red'
casterColor = 'green'
legend = ['Imaginary Weave', 'Imaginary Capsize',
'Imaginary Caster', 'Real Weave', 'Real Capsize',
'Real Caster']
maxLabel = 'Max Eigenvalue'
if largest:
maxEval = np.max(np.real(evals), axis=1)
plt.plot(speeds, maxEval, color=color, label=maxLabel,
linestyle=linestyle, linewidth=1.5)
# x axis line
plt.plot(speeds, np.zeros_like(speeds), 'k-',
label='_nolegend_', linewidth=1.5)
plt.ylim((np.min(maxEval), np.max(maxEval)))
plt.ylabel('Real Part of the Largest Eigenvalue [1/s]')
else:
wea, cap, cas = bicycle.sort_modes(evals, evecs)
# imaginary components
plt.plot(speeds, np.abs(np.imag(wea['evals'])), color=weaveColor,
label=legend[0], linestyle='--')
plt.plot(speeds, np.abs(np.imag(cap['evals'])), color=capsizeColor,
label=legend[1], linestyle='--')
plt.plot(speeds, np.abs(np.imag(cas['evals'])), color=casterColor,
label=legend[2], linestyle='--')
# x axis line
plt.plot(speeds, np.zeros_like(speeds), 'k-',
label='_nolegend_', linewidth=1.5)
# plot the real parts of the eigenvalues
plt.plot(speeds, np.real(wea['evals']),
color=weaveColor, label=legend[3])
plt.plot(speeds, np.real(cap['evals']),
color=capsizeColor, label=legend[4])
plt.plot(speeds, np.real(cas['evals']),
color=casterColor, label=legend[5])
# set labels and limits
plt.ylim((np.min(np.real(evals)),
np.max(np.imag(evals))))
plt.ylabel('Real and Imaginary Parts of the Eigenvalue [1/s]')
plt.xlim((speeds[0], speeds[-1]))
plt.xlabel('Speed [m/s]')
if generic:
plt.title('Eigenvalues vs Speed')
else:
plt.title('%s\nEigenvalues vs Speed' % self.bicycleName)
plt.legend()
if show:
plt.show()
return fig
def plot_bode(self, speed, u, y, **kwargs):
"""Returns a Bode plot.
Parameters
----------
speed : float
The speed at which to evaluate the system.
u : integer
An integer between 0 and 1 corresponding to the inputs roll torque
and steer torque.
y : integer
An integer between 0 and 3 corresponding to the inputs roll angle
steer angle, roll rate, steer rate.
kwargs : keyword pairs
Any options that can be passed to dtk.bode.
Returns
-------
mag : ndarray, shape(1000,)
The magnitude in dB of the frequency reponse.
phase : ndarray, shape(1000,)
The phase in degress of the frequency response.
fig : matplotlib figure
The Bode plot.
"""
A, B = self.state_space(speed, nominal=True)
C = np.eye(A.shape[0])
D = np.zeros_like(B)
w = np.logspace(0, 2, 1000)
outputNames = ['Roll Angle', 'Steer Angle', 'Roll Rate', 'Steer Rate']
inputNames = ['Roll Torque', 'Steer Torque']
if 'title' not in kwargs.keys():
kwargs['title'] = inputNames[u] + ' to ' + outputNames[y]
bode = control.bode((A, B[:, u], C[y, :], D[y, u]), w, **kwargs)
return bode
def compare_bode_speeds(self, speeds, u, y, fig=None):
"""Returns a figure with the Bode plots of multiple bicycles.
Parameters
----------
speeds : list
A list of speeds at which to evaluate the system.
u : integer
An integer between 0 and 1 corresponding to the inputs roll torque
and steer torque.
y : integer
An integer between 0 and 3 corresponding to the inputs roll angle,
steer angle, roll rate, steer rate.
Returns
-------
fig : matplotlib.Figure instance
The Bode plot.
Notes
-----
The phases are matched around zero degrees at with respect to the first
frequency.
"""
if fig is None:
fig = plt.figure()
for speed in speeds:
self.plot_bode(speed, u, y, label=str(speed) + ' m/s', fig=fig)
# take care of phase misalignment
phaseLines = fig.ax2.lines
for line in phaseLines:
firstValue = line.get_ydata()[0]
n = np.ceil(np.floor(abs(firstValue / 180.)) / 2.)
line.set_ydata(line.get_ydata() - np.sign(firstValue) * n * 360.)
return fig
def get_parts_in_parameters(par):
'''Returns a list of parts in a parameter dictionary.
Parameters
----------
par : dictionary
Benchmark bicycle parameters.
Returns
-------
parts : list
Unique list of parts that contain one or more of 'H', 'B', 'F', 'R',
'S', 'G', 'D'.
'''
parts = [x[1] for x in par.keys() if x.startswith('m')]
return parts
def calculate_benchmark_from_measured(mp):
'''Returns the benchmark (Meijaard 2007) parameter set based on the
measured data.
Parameters
----------
mp : dictionary
Complete set of measured data.
Returns
-------
par : dictionary
Benchmark bicycle parameter set.
'''
forkIsSplit = is_fork_split(mp)
par = {}
# calculate the wheelbase, steer axis tilt and trail
par = geometry.calculate_benchmark_geometry(mp, par)
# masses
par['mB'] = mp['mB']
par['mF'] = mp['mF']
par['mR'] = mp['mR']
try:
# we measured the mass of the flywheel plus the mass of the front
# wheel, mp['mD'], so to get the actual mass of the flywheel, subtract
# the mass of the front wheel
par['mD'] = mp['mD'] - mp['mF']
except KeyError:
pass
if forkIsSplit:
par['mS'] = mp['mS']
par['mG'] = mp['mG']
else:
par['mH'] = mp['mH']
# get the slopes, intercepts and betas for each part
slopes, intercepts, betas = com.part_com_lines(mp, par, forkIsSplit)
# calculate the centers of mass
for part in slopes.keys():
par['x' + part], par['z' + part] = com.center_of_mass(slopes[part],
intercepts[part])
# find the center of mass of the handlebar/fork assembly if the fork was
# split
if forkIsSplit:
coordinates = np.array([[par['xS'], par['xG']],
[0., 0.],
[par['zS'], par['zG']]])
masses = np.array([par['mS'], par['mG']])
mH, cH = inertia.total_com(coordinates, masses)
par['mH'] = mH
par['xH'] = cH[0]
par['zH'] = cH[2]
# local accelation due to gravity
par['g'] = mp['g']
# calculate the wheel y inertias
par['IFyy'] = inertia.compound_pendulum_inertia(mp['mF'], mp['g'],
mp['lF'], mp['TcF1'])
par['IRyy'] = inertia.compound_pendulum_inertia(mp['mR'], mp['g'],
mp['lR'], mp['TcR1'])
try:
# we measured the inertia of the front wheel with the flywheel inside
iFlywheelPlusFwheel = inertia.compound_pendulum_inertia(mp['mD'], mp['g'], mp['lF'], mp['TcD1'])
par['IDyy'] = iFlywheelPlusFwheel - par['IFyy']
except KeyError:
pass
# calculate the y inertias for the frame and fork
lB = (par['xB']**2 + (par['zB'] + par['rR'])**2)**(0.5)
par['IByy'] = inertia.compound_pendulum_inertia(mp['mB'], mp['g'], lB,
mp['TcB1'])
if forkIsSplit:
# fork
lS = ((par['xS'] - par['w'])**2 +
(par['zS'] + par['rF'])**2)**(0.5)
par['ISyy'] = inertia.compound_pendulum_inertia(mp['mS'], mp['g'],
lS, mp['TcS1'])
# handlebar
l1, l2 = geometry.calculate_l1_l2(mp['h6'], mp['h7'],
mp['d5'], mp['d6'], mp['l'])
u1, u2 = geometry.fwheel_to_handlebar_ref(par['lam'], l1, l2)
lG = ((par['xG'] - par['w'] + u1)**2 +
(par['zG'] + par['rF'] + u2)**2)**(.5)
par['IGyy'] = inertia.compound_pendulum_inertia(mp['mG'], mp['g'],
lG, mp['TcG1'])
else:
lH = ((par['xH'] - par['w'])**2 +
(par['zH'] + par['rF'])**2)**(0.5)
par['IHyy'] = inertia.compound_pendulum_inertia(mp['mH'], mp['g'],
lH, mp['TcH1'])
# calculate the stiffness of the torsional pendulum
IPxx, IPyy, IPzz = inertia.tube_inertia(mp['lP'], mp['mP'],
mp['dP'] / 2., 0.)
torStiff = inertia.torsional_pendulum_stiffness(IPyy, mp['TtP1'])
#print("Torsional pendulum stiffness:", torStiff)
# calculate the wheel x/z inertias
par['IFxx'] = inertia.tor_inertia(torStiff, mp['TtF1'])
par['IRxx'] = inertia.tor_inertia(torStiff, mp['TtR1'])
try:
par['IDxx'] = inertia.tor_inertia(torStiff, mp['TtD1']) - par['IFxx']
except KeyError:
pass
pendulumInertias = {}
# calculate the in plane moments of inertia
for part, slopeSet in slopes.items():
# the number of orientations for this part
numOrien = len(slopeSet)
# intialize arrays to store the inertia values and orientation angles
penInertia = np.zeros(numOrien, dtype=object)
beta = np.array(betas[part])
# fill arrays of the inertias
for i in range(numOrien):
penInertia[i] = inertia.tor_inertia(torStiff, mp['Tt' + part + str(i + 1)])
# store these inertias
pendulumInertias[part] = list(penInertia)
inert = inertia.inertia_components(penInertia, beta)
for i, axis in enumerate(['xx', 'xz', 'zz']):
par['I' + part + axis] = inert[i]
if forkIsSplit:
# combine the moments of inertia to find the total handlebar/fork MoI
IG = inertia.part_inertia_tensor(par, 'G')
IS = inertia.part_inertia_tensor(par, 'S')
# columns are parts, rows = x, y, z
coordinates = np.array([[par['xG'], par['xS']],
[0., 0.],
[par['zG'], par['zS']]])
masses = | np.array([par['mG'], par['mS']]) | numpy.array |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.